diff --git a/go.mod b/go.mod index 44a8a8a33fe8b1bb46eac6ae634f0fd4c6da956e..96ef5ca72a1c48155ce922663a1426700abf541a 100644 --- a/go.mod +++ b/go.mod @@ -11,34 +11,30 @@ require ( github.com/google/uuid v1.1.1 github.com/hashicorp/golang-lru v0.5.3 // indirect github.com/imdario/mergo v0.3.8 // indirect - github.com/openshift/api v0.0.0-20200127192224-ffde1bfabb9f - github.com/openshift/client-go v0.0.0-20190617165122-8892c0adc000 - github.com/openshift/cluster-version-operator v3.11.1-0.20190629164025-08cac1c02538+incompatible + github.com/openshift/api v0.0.0-20200210091934-a0e53e94816b + github.com/openshift/client-go v0.0.0-20200116152001-92a2713fa240 + github.com/openshift/library-go v0.0.0-00010101000000-000000000000 github.com/operator-framework/operator-sdk v0.5.1-0.20190301204940-c2efe6f74e7b github.com/pkg/errors v0.8.1 - github.com/prometheus/client_golang v1.0.0 + github.com/prometheus/client_golang v1.1.0 github.com/prometheus/common v0.6.0 // indirect github.com/prometheus/procfs v0.0.5 // indirect github.com/spf13/cobra v0.0.5 github.com/stretchr/testify v1.4.0 github.com/vmware/govmomi v0.21.0 go.uber.org/atomic v1.4.0 // indirect - go.uber.org/zap v1.10.0 // indirect - golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 + golang.org/x/net v0.0.0-20200202094626-16171245cfb2 golang.org/x/sys v0.0.0-20190911201528-7ad0cfa0b7b5 // indirect - golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect gonum.org/v1/gonum v0.0.0-20190915125329-975d99cd20a9 // indirect google.golang.org/appengine v1.6.1 // indirect gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect gopkg.in/gcfg.v1 v1.2.3 gopkg.in/warnings.v0 v0.1.2 // indirect k8s.io/api v0.17.2 - k8s.io/apiextensions-apiserver v0.0.0-20190918201827-3de75813f604 // indirect k8s.io/apimachinery v0.17.2 k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible k8s.io/code-generator v0.17.2 k8s.io/klog v1.0.0 - k8s.io/kube-aggregator v0.0.0-20190404125450-f5e124c822d6 // indirect k8s.io/kubectl v0.0.0-20200124035537-9f7d91504e51 k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc sigs.k8s.io/controller-runtime v0.3.1-0.20191016212439-2df793d02076 @@ -51,7 +47,11 @@ replace github.com/prometheus/client_golang => github.com/prometheus/client_gola replace sigs.k8s.io/controller-tools => sigs.k8s.io/controller-tools v0.2.2-0.20190919191502-76a25b63325a -replace github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20190923180330-3b6373338c9b +replace github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20200116152001-92a2713fa240 + +replace github.com/openshift/library-go => github.com/openshift/library-go v0.0.0-20200921144613-67f7770bf823 + +replace github.com/openshift/api => github.com/openshift/api v0.0.0-20200618202633-7192180f496a // pinning to kubernetes-1.16.0 diff --git a/go.sum b/go.sum index 19422d6ae4b1ba9ce0c7ae4f222d1f480faa85e8..9dd9d116ec27cb34c55e81a8eac921d0c33f2e7a 100644 --- a/go.sum +++ b/go.sum @@ -13,6 +13,7 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -28,11 +29,15 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -41,8 +46,11 @@ github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHo github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -50,11 +58,18 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/docker/distribution v0.0.0-20180920194744-16128bbac47f/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libnetwork v0.0.0-20190731215715-7f13a5c99f4b/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -68,7 +83,10 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsouza/go-dockerclient v0.0.0-20171004212419-da3951ba2e9e/go.mod h1:KpcjM623fQYE9MZiTGzKhjfxXAV9wbyX2C1cyRHfhl0= +github.com/getsentry/raven-go v0.0.0-20190513200303-c977f96e1095/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= @@ -121,6 +139,7 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/gobuffalo/flect v0.1.5 h1:xpKq9ap8MbYfhuPCF0dBH854Gp9CxZjr/IocxELFflo= github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= @@ -142,7 +161,14 @@ github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= @@ -150,6 +176,8 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/uuid v0.0.0-20170306145142-6a5e28554805/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -162,11 +190,15 @@ github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTV github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gorilla/mux v0.0.0-20191024121256-f395758b854c/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= @@ -186,8 +218,10 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jteeuwen/go-bindata v3.0.8-0.20151023091102-a0ff2567cfb7+incompatible/go.mod h1:JVvhzYOiGBnFSYRyV00iY8q7/0PThjIYav1p9h5dmKs= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -198,6 +232,7 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kubernetes-sigs/kube-storage-version-migrator v0.0.0-20191127225502-51849bc15f17/go.mod h1:enH0BVV+4+DAgWdwSlMefG8bBzTfVMTr1lApzdLZ/cc= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -207,10 +242,13 @@ github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -223,9 +261,12 @@ github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lN github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -236,12 +277,13 @@ github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/openshift/api v0.0.0-20200127192224-ffde1bfabb9f h1:cMwQHcNGVg4XusMZxmdGqjp2kQZXeJo32MpF9nq26Yo= -github.com/openshift/api v0.0.0-20200127192224-ffde1bfabb9f/go.mod h1:fT6U/JfG8uZzemTRwZA2kBDJP5nWz7v05UHnty/D+pk= -github.com/openshift/client-go v0.0.0-20190923180330-3b6373338c9b h1:E++qQ7W1/EdvuMo+YGVbMPn4HihEp7YT5Rghh0VmA9A= -github.com/openshift/client-go v0.0.0-20190923180330-3b6373338c9b/go.mod h1:6rzn+JTr7+WYS2E1TExP4gByoABxMznR6y2SnUIkmxk= -github.com/openshift/cluster-version-operator v3.11.1-0.20190629164025-08cac1c02538+incompatible h1:aLjroKDBGQ7oW2R8876TsB1nKIl+loREmjEVm0aqTno= -github.com/openshift/cluster-version-operator v3.11.1-0.20190629164025-08cac1c02538+incompatible/go.mod h1:0BbpR1mrN0F2ZRae5N1XHcytmkvVPaeKgSQwRRBWugc= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20191031171055-b133feaeeb2e/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/openshift/api v0.0.0-20200618202633-7192180f496a h1:XgIAqRY7Ve0Vfl0lZHXHqGBFG2zKjvFIxlxGYL6SayE= +github.com/openshift/api v0.0.0-20200618202633-7192180f496a/go.mod h1:fT6U/JfG8uZzemTRwZA2kBDJP5nWz7v05UHnty/D+pk= +github.com/openshift/build-machinery-go v0.0.0-20200211121458-5e3d6e570160/go.mod h1:1CkcsT3aVebzRBzVTSbiKSkJMsC/CASqxesfqEMfJEc= +github.com/openshift/client-go v0.0.0-20200116152001-92a2713fa240 h1:XYfJWv2Ch+qInGLDEedHRtDsJwnxyU1L8U7SY56NcA8= +github.com/openshift/client-go v0.0.0-20200116152001-92a2713fa240/go.mod h1:4riOwdj99Hd/q+iAcJZfNCsQQQMwURnZV6RL4WHYS5w= github.com/openshift/kubernetes-apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783 h1:Mek6OZC4QulQv6YQdwlHinEcsBGfynqh3dMXlBAv0h0= github.com/openshift/kubernetes-apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= github.com/openshift/kubernetes-apimachinery v0.0.0-20190913080033-27d36303b655 h1:ZVUfsf5Si43TsXabIU8ugpWDk9uzn4vqOUk4AP2q+Aw= @@ -250,6 +292,8 @@ github.com/openshift/kubernetes-client-go v0.0.0-20190918160344-1fbdaa4c8d90 h1: github.com/openshift/kubernetes-client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk= github.com/openshift/kubernetes-kube-aggregator v0.0.0-20190918161219-8c8f079fddc3 h1:Fs6hTSgMPCLZcy0wFKcCFk/YDYaHa+eMXRak4oxuNBQ= github.com/openshift/kubernetes-kube-aggregator v0.0.0-20190918161219-8c8f079fddc3/go.mod h1:NJisPUqwlg1A99RhO1BTnNtwC4pKUyXJ2f3Xc4PxKQg= +github.com/openshift/library-go v0.0.0-20200921144613-67f7770bf823 h1:v1Q8DQSkJhxQYA356jV5QSO9Jd91Oy7olY8Ds4jQZZE= +github.com/openshift/library-go v0.0.0-20200921144613-67f7770bf823/go.mod h1:x072xQzqjl4BYII7ytMIh8fSgELnxBGqgUiGWYkZ8CI= github.com/operator-framework/operator-sdk v0.5.1-0.20190301204940-c2efe6f74e7b h1:Q1q8w51pAZdx6LEkaYdSbUaaEOHXTyTXLhtGgIiKaiA= github.com/operator-framework/operator-sdk v0.5.1-0.20190301204940-c2efe6f74e7b/go.mod h1:iVyukRkam5JZa8AnjYf+/G3rk7JI1+M6GsU0sq0B9NA= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= @@ -258,6 +302,7 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.3.0/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -279,17 +324,21 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -304,12 +353,18 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vmware/govmomi v0.21.0 h1:jc8uMuxpcV2xMAA/cnEDlnsIjvqcMra5Y8onh/U3VuY= github.com/vmware/govmomi v0.21.0/go.mod h1:zbnFoBQ9GIjs2RVETy8CNEpb+L+Lwkjs3XZUL0B3/m0= github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -329,6 +384,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -352,6 +408,7 @@ golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -361,9 +418,13 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= @@ -377,6 +438,7 @@ golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -396,9 +458,11 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -440,11 +504,14 @@ google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= @@ -452,14 +519,18 @@ gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966 h1:B0J02caTR6tpSJozBJyiAzT6CtBzjclw4pgm9gg8Ys0= @@ -471,11 +542,13 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh k8s.io/api v0.0.0-20190918155943-95b840bb6a1f h1:8FRUST8oUkEI45WYKyD8ed7Ad0Kg5v11zHyPkEVb2xo= k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48= k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= +k8s.io/apiserver v0.17.1/go.mod h1:BQEUObJv8H6ZYO7DeKI5vb50tjk6paRJ4ZhSyJsiSco= k8s.io/cli-runtime v0.0.0-20200124034138-fea5ca65697c/go.mod h1:BiNOUI2kOIZEjd29Xk2kIhHbTDwhoL/jTGf2srqUlsY= k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269 h1:d8Fm55A+7HOczX58+x9x+nJnJ1Devt1aCrWVIPaw/Vg= k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= k8s.io/component-base v0.0.0-20200124032913-0919167b1313/go.mod h1:U8+NYX6bzTEKUJ2pwPBi/a83Z7IS8gFoaa+DESUU6as= +k8s.io/component-base v0.17.1/go.mod h1:LrBPZkXtlvGjBzDJa0+b7E5Ij4VoAAKrOGudRC5z2eY= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -486,12 +559,14 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf h1:EYm5AW/UUDbnmnI+gK0TJDVK9qPLhM+sRHYanNKw0EQ= k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kubectl v0.0.0-20200124035537-9f7d91504e51 h1:edZAu3jjcBCfPsiBFgCm9KstOkaXLgzpxW3MxMrmqJ0= k8s.io/kubectl v0.0.0-20200124035537-9f7d91504e51/go.mod h1:SdAdIAhot+OAzP9LVgZcYV5fSNndnJZnu7Xui9xvZmE= k8s.io/metrics v0.0.0-20200124034024-3e2f50f1d54a/go.mod h1:g2plynn0AY5rsm5f8CasNVbOsNE2grZFt7YAdhKGYn4= k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc h1:MUttqhwRgupMiA5ps5F3d2/NLkU8EZSECTGxrQxqM54= k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= @@ -507,6 +582,7 @@ sigs.k8s.io/controller-tools v0.2.2-0.20190919191502-76a25b63325a/go.mod h1:8SNG sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= +sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= sigs.k8s.io/testing_frameworks v0.1.2-0.20190130140139-57f07443c2d4 h1:GtDhkj3cF4A4IW+A9LScsuxvJqA9DE7G7PGH1f8B07U= @@ -514,3 +590,4 @@ sigs.k8s.io/testing_frameworks v0.1.2-0.20190130140139-57f07443c2d4/go.mod h1:VV sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= +vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/pkg/operator/status.go b/pkg/operator/status.go index f25a203d43df6c2b1bac0da1502c914e80229737..39706425e1694e4b26f032236c8cc145051e3106 100644 --- a/pkg/operator/status.go +++ b/pkg/operator/status.go @@ -5,12 +5,11 @@ import ( "reflect" "strings" - "k8s.io/apimachinery/pkg/api/equality" - "github.com/golang/glog" osconfigv1 "github.com/openshift/api/config/v1" - cvoresourcemerge "github.com/openshift/cluster-version-operator/lib/resourcemerge" + "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -152,7 +151,7 @@ func newClusterOperatorStatusCondition(conditionType osconfigv1.ClusterStatusCon //syncStatus applies the new condition to the mao ClusterOperator object. func (optr *Operator) syncStatus(co *osconfigv1.ClusterOperator, conds []osconfigv1.ClusterOperatorStatusCondition) error { for _, c := range conds { - cvoresourcemerge.SetOperatorStatusCondition(&co.Status.Conditions, c) + v1helpers.SetStatusCondition(&co.Status.Conditions, c) } _, err := optr.osClient.ConfigV1().ClusterOperators().UpdateStatus(co) diff --git a/pkg/operator/status_test.go b/pkg/operator/status_test.go index 7e09b35c22530825e136329d26013cd7813b2626..83abcfc121a6910ba2a0b7d25ce772f5735e3bf7 100644 --- a/pkg/operator/status_test.go +++ b/pkg/operator/status_test.go @@ -7,7 +7,7 @@ import ( osconfigv1 "github.com/openshift/api/config/v1" fakeconfigclientset "github.com/openshift/client-go/config/clientset/versioned/fake" - cvoresourcemerge "github.com/openshift/cluster-version-operator/lib/resourcemerge" + "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" @@ -103,13 +103,13 @@ func TestOperatorStatusProgressing(t *testing.T) { } for _, expectedCondition := range tc.expectedConditions { - ok := cvoresourcemerge.IsOperatorStatusConditionPresentAndEqual( + ok := v1helpers.IsStatusConditionPresentAndEqual( gotCO.Status.Conditions, expectedCondition.Type, expectedCondition.Status, ) if !ok { t.Errorf("wrong status for condition. Expected: %v, got: %v", expectedCondition, - cvoresourcemerge.FindOperatorStatusCondition(gotCO.Status.Conditions, expectedCondition.Type)) + v1helpers.FindStatusCondition(gotCO.Status.Conditions, expectedCondition.Type)) } } @@ -125,13 +125,13 @@ func TestOperatorStatusProgressing(t *testing.T) { assert.True(t, condition.LastTransitionTime.Equal(&conditionAfterAnotherSync.LastTransitionTime), "test-case %v expected LastTransitionTime not to be updated if condition state is same", i) for _, expectedCondition := range tc.expectedConditions { - ok := cvoresourcemerge.IsOperatorStatusConditionPresentAndEqual( + ok := v1helpers.IsStatusConditionPresentAndEqual( gotCO.Status.Conditions, expectedCondition.Type, expectedCondition.Status, ) if !ok { t.Errorf("wrong status for condition. Expected: %v, got: %v", expectedCondition, - cvoresourcemerge.FindOperatorStatusCondition(gotCO.Status.Conditions, expectedCondition.Type)) + v1helpers.FindStatusCondition(gotCO.Status.Conditions, expectedCondition.Type)) } } } diff --git a/pkg/operator/sync.go b/pkg/operator/sync.go index 2ac81c8fa8d6a857b1b0d455776918640b430826..f1442e95869fc04a92af508ea419762ed8424164 100644 --- a/pkg/operator/sync.go +++ b/pkg/operator/sync.go @@ -5,6 +5,8 @@ import ( "time" "github.com/golang/glog" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -12,8 +14,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/utils/pointer" - - "github.com/openshift/cluster-version-operator/lib/resourceapply" ) const ( @@ -62,13 +62,25 @@ func (optr *Operator) syncAll(config *OperatorConfig) error { } func (optr *Operator) syncClusterAPIController(config *OperatorConfig) error { - controller := newDeployment(config, nil) - _, updated, err := resourceapply.ApplyDeployment(optr.kubeClient.AppsV1(), controller) + controllersDeployment := newDeployment(config, nil) + d, err := optr.deployLister.Deployments(controllersDeployment.GetNamespace()).Get(controllersDeployment.GetName()) + generation := int64(-1) + if err != nil { + if !apierrors.IsNotFound(err) { + return err + } + } + if d != nil { + generation = d.Status.ObservedGeneration + } + + _, updated, err := resourceapply.ApplyDeployment(optr.kubeClient.AppsV1(), + events.NewLoggingEventRecorder(optr.name), controllersDeployment, generation) if err != nil { return err } if updated { - return optr.waitForDeploymentRollout(controller) + return optr.waitForDeploymentRollout(controllersDeployment) } return nil } @@ -87,7 +99,19 @@ func (optr *Operator) syncBaremetalControllers(config *OperatorConfig) error { } metal3Deployment := newMetal3Deployment(config, baremetalProvisioningConfig) - _, updated, err := resourceapply.ApplyDeployment(optr.kubeClient.AppsV1(), metal3Deployment) + generation := int64(-1) + d, err := optr.deployLister.Deployments(metal3Deployment.GetNamespace()).Get(metal3Deployment.GetName()) + if err != nil { + if !apierrors.IsNotFound(err) { + return err + } + } + if d != nil { + generation = d.Status.ObservedGeneration + } + + _, updated, err := resourceapply.ApplyDeployment(optr.kubeClient.AppsV1(), + events.NewLoggingEventRecorder(optr.name), metal3Deployment, generation) if err != nil { return err } diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..e256a31e00a52d70866e1f46a686732fa03f0302 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.gitignore @@ -0,0 +1,20 @@ +# OSX leaves these everywhere on SMB shares +._* + +# Eclipse files +.classpath +.project +.settings/** + +# Emacs save files +*~ + +# Vim-related files +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist + +# Go test binaries +*.test diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml new file mode 100644 index 0000000000000000000000000000000000000000..0e9d6edc010a643ee82e52fdb95d157ad3fbfee1 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: + - 1.3 + - 1.4 +script: + - go test + - go build diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..7805d36de7305dddde10c5524242784bdb0cd114 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0200f75b4d126199dbb86791b78ff6ae6851c0b1 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/README.md @@ -0,0 +1,121 @@ +# YAML marshaling and unmarshaling support for Go + +[](https://travis-ci.org/ghodss/yaml) + +## Introduction + +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. + +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). + +## Compatibility + +This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). + +## Caveats + +**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: + +``` +BAD: + exampleKey: !!binary gIGC + +GOOD: + exampleKey: gIGC +... and decode the base64 data in your code. +``` + +**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. + +## Installation and usage + +To install, run: + +``` +$ go get github.com/ghodss/yaml +``` + +And import using: + +``` +import "github.com/ghodss/yaml" +``` + +Usage is very similar to the JSON library: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +type Person struct { + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"age"` +} + +func main() { + // Marshal a Person struct to YAML. + p := Person{"John", 30} + y, err := yaml.Marshal(p) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + age: 30 + name: John + */ + + // Unmarshal the YAML back into a Person struct. + var p2 Person + err = yaml.Unmarshal(y, &p2) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(p2) + /* Output: + {John 30} + */ +} +``` + +`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +func main() { + j := []byte(`{"name": "John", "age": 30}`) + y, err := yaml.JSONToYAML(j) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + name: John + age: 30 + */ + j2, err := yaml.YAMLToJSON(y) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(j2)) + /* Output: + {"age":30,"name":"John"} + */ +} +``` diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go new file mode 100644 index 0000000000000000000000000000000000000000..58600740266c675a76ec05954ff6ec1ae33228c6 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/fields.go @@ -0,0 +1,501 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package yaml + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + if v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } else { + v = reflect.New(v.Type().Elem()) + } + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(json.Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go new file mode 100644 index 0000000000000000000000000000000000000000..4fb4054a8b74711fcd7142b05f95923c9f715449 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -0,0 +1,277 @@ +package yaml + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strconv" + + "gopkg.in/yaml.v2" +) + +// Marshals the object into JSON then converts JSON to YAML and returns the +// YAML. +func Marshal(o interface{}) ([]byte, error) { + j, err := json.Marshal(o) + if err != nil { + return nil, fmt.Errorf("error marshaling into JSON: %v", err) + } + + y, err := JSONToYAML(j) + if err != nil { + return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + } + + return y, nil +} + +// Converts YAML to JSON then uses JSON to unmarshal into an object. +func Unmarshal(y []byte, o interface{}) error { + vo := reflect.ValueOf(o) + j, err := yamlToJSON(y, &vo) + if err != nil { + return fmt.Errorf("error converting YAML to JSON: %v", err) + } + + err = json.Unmarshal(j, o) + if err != nil { + return fmt.Errorf("error unmarshaling JSON: %v", err) + } + + return nil +} + +// Convert JSON to YAML. +func JSONToYAML(j []byte) ([]byte, error) { + // Convert the JSON to an object. + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + err := yaml.Unmarshal(j, &jsonObj) + if err != nil { + return nil, err + } + + // Marshal this object into YAML. + return yaml.Marshal(jsonObj) +} + +// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through +// this method should be a no-op. +// +// Things YAML can do that are not supported by JSON: +// * In YAML you can have binary and null keys in your maps. These are invalid +// in JSON. (int and float keys are converted to strings.) +// * Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +func YAMLToJSON(y []byte) ([]byte, error) { + return yamlToJSON(y, nil) +} + +func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { + // Convert the YAML to an object. + var yamlObj interface{} + err := yaml.Unmarshal(y, &yamlObj) + if err != nil { + return nil, err + } + + // YAML objects are not completely compatible with JSON objects (e.g. you + // can have non-string keys in YAML). So, convert the YAML-compatible object + // to a JSON-compatible object, failing with an error if irrecoverable + // incompatibilties happen along the way. + jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) + if err != nil { + return nil, err + } + + // Convert this object to JSON and return the data. + return json.Marshal(jsonObj) +} + +func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { + var err error + + // Resolve jsonTarget to a concrete value (i.e. not a pointer or an + // interface). We pass decodingNull as false because we're not actually + // decoding into the value, we're just checking if the ultimate target is a + // string. + if jsonTarget != nil { + ju, tu, pv := indirect(*jsonTarget, false) + // We have a JSON or Text Umarshaler at this level, so we can't be trying + // to decode into a string. + if ju != nil || tu != nil { + jsonTarget = nil + } else { + jsonTarget = &pv + } + } + + // If yamlObj is a number or a boolean, check if jsonTarget is a string - + // if so, coerce. Else return normal. + // If yamlObj is a map or array, find the field that each key is + // unmarshaling to, and when you recurse pass the reflect.Value for that + // field back into this function. + switch typedYAMLObj := yamlObj.(type) { + case map[interface{}]interface{}: + // JSON does not support arbitrary keys in a map, so we must convert + // these keys to strings. + // + // From my reading of go-yaml v2 (specifically the resolve function), + // keys can only have the types string, int, int64, float64, binary + // (unsupported), or null (unsupported). + strMap := make(map[string]interface{}) + for k, v := range typedYAMLObj { + // Resolve the key to a string first. + var keyString string + switch typedKey := k.(type) { + case string: + keyString = typedKey + case int: + keyString = strconv.Itoa(typedKey) + case int64: + // go-yaml will only return an int64 as a key if the system + // architecture is 32-bit and the key's value is between 32-bit + // and 64-bit. Otherwise the key type will simply be int. + keyString = strconv.FormatInt(typedKey, 10) + case float64: + // Stolen from go-yaml to use the same conversion to string as + // the go-yaml library uses to convert float to string when + // Marshaling. + s := strconv.FormatFloat(typedKey, 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + keyString = s + case bool: + if typedKey { + keyString = "true" + } else { + keyString = "false" + } + default: + return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + reflect.TypeOf(k), k, v) + } + + // jsonTarget should be a struct or a map. If it's a struct, find + // the field it's going to map to and pass its reflect.Value. If + // it's a map, find the element type of the map and pass the + // reflect.Value created from that type. If it's neither, just pass + // nil - JSON conversion will error for us if it's a real issue. + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Struct { + keyBytes := []byte(keyString) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, keyBytes) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential + // struct field. + jtf := t.Field(f.index[0]) + strMap[keyString], err = convertToJSONableObject(v, &jtf) + if err != nil { + return nil, err + } + continue + } + } else if t.Kind() == reflect.Map { + // Create a zero value of the map's element type to use as + // the JSON target. + jtv := reflect.Zero(t.Type().Elem()) + strMap[keyString], err = convertToJSONableObject(v, &jtv) + if err != nil { + return nil, err + } + continue + } + } + strMap[keyString], err = convertToJSONableObject(v, nil) + if err != nil { + return nil, err + } + } + return strMap, nil + case []interface{}: + // We need to recurse into arrays in case there are any + // map[interface{}]interface{}'s inside and to convert any + // numbers to strings. + + // If jsonTarget is a slice (which it really should be), find the + // thing it's going to map to. If it's not a slice, just pass nil + // - JSON conversion will error for us if it's a real issue. + var jsonSliceElemValue *reflect.Value + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Slice { + // By default slices point to nil, but we need a reflect.Value + // pointing to a value of the slice type, so we create one here. + ev := reflect.Indirect(reflect.New(t.Type().Elem())) + jsonSliceElemValue = &ev + } + } + + // Make and use a new array. + arr := make([]interface{}, len(typedYAMLObj)) + for i, v := range typedYAMLObj { + arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) + if err != nil { + return nil, err + } + } + return arr, nil + default: + // If the target type is a string and the YAML type is a number, + // convert the YAML type to a string. + if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { + // Based on my reading of go-yaml, it may return int, int64, + // float64, or uint64. + var s string + switch typedVal := typedYAMLObj.(type) { + case int: + s = strconv.FormatInt(int64(typedVal), 10) + case int64: + s = strconv.FormatInt(typedVal, 10) + case float64: + s = strconv.FormatFloat(typedVal, 'g', -1, 32) + case uint64: + s = strconv.FormatUint(typedVal, 10) + case bool: + if typedVal { + s = "true" + } else { + s = "false" + } + } + if len(s) > 0 { + yamlObj = interface{}(s) + } + } + return yamlObj, nil + } + + return nil, nil +} diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md index 64869af347a56b736e9d230bff4231aeb62d7f88..386c2a457a8b6e9413678d386f75aef4abfa3612 100644 --- a/vendor/github.com/google/gofuzz/README.md +++ b/vendor/github.com/google/gofuzz/README.md @@ -3,7 +3,7 @@ gofuzz gofuzz is a library for populating go objects with random values. -[](https://godoc.org/github.com/google/gofuzz) +[](https://godoc.org/github.com/google/gofuzz) [](https://travis-ci.org/google/gofuzz) This is useful for testing: diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go index 1dfa80a6fca972c89eac59415225ebff6aa551c7..da0a5f93800d85215614c1f34b3d344df2e42cde 100644 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -20,6 +20,7 @@ import ( "fmt" "math/rand" "reflect" + "regexp" "time" ) @@ -28,13 +29,14 @@ type fuzzFuncMap map[reflect.Type]reflect.Value // Fuzzer knows how to fill any object with random fields. type Fuzzer struct { - fuzzFuncs fuzzFuncMap - defaultFuzzFuncs fuzzFuncMap - r *rand.Rand - nilChance float64 - minElements int - maxElements int - maxDepth int + fuzzFuncs fuzzFuncMap + defaultFuzzFuncs fuzzFuncMap + r *rand.Rand + nilChance float64 + minElements int + maxElements int + maxDepth int + skipFieldPatterns []*regexp.Regexp } // New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, @@ -150,6 +152,13 @@ func (f *Fuzzer) MaxDepth(d int) *Fuzzer { return f } +// Skip fields which match the supplied pattern. Call this multiple times if needed +// This is useful to skip XXX_ fields generated by protobuf +func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer { + f.skipFieldPatterns = append(f.skipFieldPatterns, pattern) + return f +} + // Fuzz recursively fills all of obj's fields with something random. First // this tries to find a custom fuzz function (see Funcs). If there is no // custom function this tests whether the object implements fuzz.Interface and, @@ -274,7 +283,17 @@ func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { v.Set(reflect.Zero(v.Type())) case reflect.Struct: for i := 0; i < v.NumField(); i++ { - fc.doFuzz(v.Field(i), 0) + skipField := false + fieldName := v.Type().Field(i).Name + for _, pattern := range fc.fuzzer.skipFieldPatterns { + if pattern.MatchString(fieldName) { + skipField = true + break + } + } + if !skipField { + fc.doFuzz(v.Field(i), 0) + } } case reflect.Chan: fallthrough diff --git a/vendor/github.com/openshift/api/.gitattributes b/vendor/github.com/openshift/api/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..124067fe730029f2c245b7f95d27a2e8cca2f723 --- /dev/null +++ b/vendor/github.com/openshift/api/.gitattributes @@ -0,0 +1,7 @@ +# Set unix LF EOL for shell scripts +*.sh text eol=lf + +**/zz_generated.*.go linguist-generated=true +**/types.generated.go linguist-generated=true +**/generated.pb.go linguist-generated=true +**/generated.proto linguist-generated=true diff --git a/vendor/github.com/openshift/api/.gitignore b/vendor/github.com/openshift/api/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..98834d134803a0d8f6ab05e597fdf593a94e9274 --- /dev/null +++ b/vendor/github.com/openshift/api/.gitignore @@ -0,0 +1,16 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ +.idea/ +_output/ \ No newline at end of file diff --git a/vendor/github.com/openshift/api/.travis.yml b/vendor/github.com/openshift/api/.travis.yml new file mode 100644 index 0000000000000000000000000000000000000000..121a378ad2f9610088674bba96e63943aa638f3a --- /dev/null +++ b/vendor/github.com/openshift/api/.travis.yml @@ -0,0 +1,22 @@ +language: go + +go: + - "1.13" + +env: + - GOFLAGS=-mod=vendor + +install: + - wget https://github.com/google/protobuf/releases/download/v3.0.2/protoc-3.0.2-linux-x86_64.zip + - mkdir protoc + - export PATH=`pwd`/protoc/bin:${PATH} + - unzip protoc-3.0.2-linux-x86_64.zip -d protoc/ + - GOFLAGS="" go get golang.org/x/tools/cmd/goimports + +script: + - make verify build test + +notifications: + irc: "chat.freenode.net#openshift-dev" + +sudo: false diff --git a/vendor/github.com/openshift/api/Dockerfile.build b/vendor/github.com/openshift/api/Dockerfile.build new file mode 100644 index 0000000000000000000000000000000000000000..9f1d7fcfdbcfcae48e1c01164a845cddec4af8c3 --- /dev/null +++ b/vendor/github.com/openshift/api/Dockerfile.build @@ -0,0 +1,13 @@ +FROM fedora:30 + +ENV GOPATH=/go +ENV PATH=/go/bin:$PATH + +RUN dnf -y install make git unzip golang wget +RUN go get -u -v golang.org/x/tools/cmd/... +RUN wget https://github.com/google/protobuf/releases/download/v3.0.2/protoc-3.0.2-linux-x86_64.zip && \ + mkdir protoc && \ + unzip protoc-3.0.2-linux-x86_64.zip -d protoc/ && \ + mv protoc/bin/protoc /usr/bin && \ + rm -rf protoc + diff --git a/vendor/github.com/openshift/api/Makefile b/vendor/github.com/openshift/api/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..b93d98a8b245c740f81178cd60fdb99597dd7a31 --- /dev/null +++ b/vendor/github.com/openshift/api/Makefile @@ -0,0 +1,53 @@ +all: build +.PHONY: all + +# Include the library makefile +include $(addprefix ./hack/alpha-build-machinery/make/, \ + golang.mk \ + targets/openshift/deps.mk \ + targets/openshift/crd-schema-gen.mk \ +) + +GO_PACKAGES :=$(addsuffix ...,$(addprefix ./,$(filter-out vendor/,$(filter-out hack/,$(wildcard */))))) +GO_BUILD_PACKAGES :=$(GO_PACKAGES) +GO_BUILD_PACKAGES_EXPANDED :=$(GO_BUILD_PACKAGES) +# LDFLAGS are not needed for dummy builds (saving time on calling git commands) +GO_LD_FLAGS:= + +# $1 - target name +# $2 - apis +# $3 - manifests +# $4 - output +$(call add-crd-gen,authorization,./authorization/v1,./authorization/v1,./authorization/v1) +$(call add-crd-gen,config,./config/v1,./config/v1,./config/v1) +$(call add-crd-gen,console,./console/v1,./console/v1,./console/v1) +$(call add-crd-gen,imageregistry,./imageregistry/v1,./imageregistry/v1,./imageregistry/v1) +$(call add-crd-gen,operator,./operator/v1,./operator/v1,./operator/v1) +$(call add-crd-gen,operator-alpha,./operator/v1alpha1,./operator/v1alpha1,./operator/v1alpha1) +$(call add-crd-gen,operatoringress,./operatoringress/v1,./operatoringress/v1,./operatoringress/v1) +$(call add-crd-gen,quota,./quota/v1,./quota/v1,./quota/v1) +$(call add-crd-gen,samples,./samples/v1,./samples/v1,./samples/v1) +$(call add-crd-gen,security,./security/v1,./security/v1,./security/v1) +$(call add-crd-gen,network,./network/v1,./network/v1,./network/v1) + +RUNTIME ?= podman +RUNTIME_IMAGE_NAME ?= openshift-api-generator + +verify-scripts: + bash -x hack/verify-deepcopy.sh + bash -x hack/verify-protobuf.sh + bash -x hack/verify-swagger-docs.sh + bash -x hack/verify-crds.sh +.PHONY: verify-scripts +verify: verify-scripts verify-codegen-crds + +update-scripts: + hack/update-deepcopy.sh + hack/update-protobuf.sh + hack/update-swagger-docs.sh +.PHONY: update-scripts +update: update-scripts update-codegen-crds + +generate-with-container: Dockerfile.build + $(RUNTIME) build -t $(RUNTIME_IMAGE_NAME) -f Dockerfile.build . + $(RUNTIME) run -ti --rm -v $(PWD):/go/src/github.com/openshift/api:z -w /go/src/github.com/openshift/api $(RUNTIME_IMAGE_NAME) make update-scripts diff --git a/vendor/github.com/openshift/api/OWNERS b/vendor/github.com/openshift/api/OWNERS new file mode 100644 index 0000000000000000000000000000000000000000..ff955ddc8ebbde7b899ef80c9445bd11e273c38d --- /dev/null +++ b/vendor/github.com/openshift/api/OWNERS @@ -0,0 +1,15 @@ +reviewers: + - adambkaplan + - abhinavdahiya +approvers: + - smarterclayton + - deads2k + - derekwaynecarr + - eparis + - jwforres + - knobunc + - sjenning + - mfojtik + - soltysh + - sttts + - bparees diff --git a/vendor/github.com/openshift/api/README.md b/vendor/github.com/openshift/api/README.md new file mode 100644 index 0000000000000000000000000000000000000000..aacb32bc0e7eab374f19ca34384030ca0c888281 --- /dev/null +++ b/vendor/github.com/openshift/api/README.md @@ -0,0 +1,69 @@ +# api +The canonical location of the OpenShift API definition. This repo holds the API type definitions and serialization code used by [openshift/client-go](https://github.com/openshift/client-go) + +## pull request process + +Pull requests that change API types in this repo that have corresponding "internal" API objects in the +[openshift/origin](https://github.com/openshift/origin) repo must be paired with a pull request to +[openshift/origin](https://github.com/openshift/origin). + +To ensure the corresponding origin pull request is ready to merge as soon as the pull request to this repo is merged: +1. Base your pull request to this repo on latest [openshift/api#master](https://github.com/openshift/api/commits/master) and ensure CI is green +2. Base your pull request to openshift/origin on latest [openshift/origin#master](https://github.com/openshift/origin/commits/master) +3. In your openshift/origin pull request: + 1. Add a TMP commit that points [glide.yaml](https://github.com/openshift/origin/blob/master/glide.yaml#L39-L41) at your fork of openshift/api, and the branch of your pull request: + + ``` + - package: github.com/openshift/api + repo: https://github.com/<your-username>/api.git + version: "<your-openshift-api-branch>" + ``` + + 2. Update your `bump(*)` commit to include the result of running `hack/update-deps.sh`, which will pull in the changes from your openshift/api pull request + 3. Make sure CI is green on your openshift/origin pull request + 4. Get LGTM on your openshift/api pull request (for API changes) and your openshift/origin pull request (for code changes) + +Once both pull requests are ready, the openshift/api pull request can be merged. + +Then do the following with your openshift/origin pull request: +1. Drop the TMP commit (pointing glide back at openshift/api#master) +2. Rerun `hack/update-deps.sh` and update your `bump(*)` commit +3. It can then be tagged and merged by CI + +## generating CRD schemas + +Since Kubernetes 1.16, every CRD created in `apiextensions.k8s.io/v1` is required to have a [structural OpenAPIV3 schema](https://kubernetes.io/blog/2019/06/20/crd-structural-schema/). The schemas provide server-side validation for fields, as well as providing the descriptions for `oc explain`. Moreover, schemas ensure structural consistency of data in etcd. Without it anything can be stored in a resource which can have security implications. As we host many of our CRDs in this repo along with their corresponding Go types we also require them to have schemas. However, the following instructions apply for CRDs that are not hosted here as well. + +These schemas are often very long and complex, and should not be written by hand. For OpenShift, we provide Makefile targets in [library-go's alpha-build-machinery](https://github.com/openshift/library-go/tree/master/alpha-build-machinery) which generate the schema, built on upstream's [controller-gen](https://github.com/kubernetes-sigs/controller-tools) tool. + +If you make a change to a CRD type in this repo, simply calling `make update-codegen-crds` should regenerate all CRDs and update the manifests. If yours is not updated, ensure that the path to its API is included in our [calls to the Makefile targets](https://github.com/openshift/api/blob/release-4.5/Makefile#L17-L29). + +To add this generator to another repo: +1. Vendor `github.com/openshift/library-go` (and ensure that the `alpha-build-machinery` subdirectory is also included in your `vendor`) + +2. Update your `Makefile` to include the following: +``` +include $(addprefix ./vendor/github.com/openshift/library-go/alpha-build-machinery/make/, \ + targets/openshift/crd-schema-gen.mk \ +) + +$(call add-crd-gen,<TARGET_NAME>,<API_DIRECTORY>,<CRD_MANIFESTS>,<MANIFEST_OUTPUT>) +``` +The parameters for the call are: + +1. `TARGET_NAME`: The name of your generated Make target. This can be anything, as long as it does not conflict with another make target. Recommended to be your api name. +2. `API_DIRECTORY`: The location of your API. For example if your Go types are located under `pkg/apis/myoperator/v1/types.go`, this should be `./pkg/apis/myoperator/v1`. +3. `CRD_MANIFESTS`: The directory your CRDs are located in. For example, if that is `manifests/my_operator.crd.yaml` then it should be `./manifests` +4. `MANIFEST_OUTPUT`: This should most likely be the same as `CRD_MANIFESTS`, and is only provided for flexibility to output generated code to a different directory. + +You can include as many calls to different APIs as necessary, or if you have multiple APIs under the same directory (eg, `v1` and `v2beta1`) you can use 1 call to the parent directory pointing to your API. + +After this, calling `make update-codegen-crds` should generate a new structural OpenAPIV3 schema for your CRDs. + +**Notes** +- This will not generate entire CRDs, only their OpenAPIV3 schemas. If you do not already have a CRD, you will get no output from the generator. +- Ensure that your API is correctly declared for the generator to pick it up. That means, in your `doc.go`, include the following: + 1. `// +groupName=<API_GROUP_NAME>`, this should match the `group` in your CRD `spec` + 2. `// +kubebuilder:validation:Optional`, this tells the operator that fields should be optional unless explicitly marked with `// +kubebuilder:validation:Required` + +For more information on the API markers to add to your Go types, see the [Kubebuilder book](https://book.kubebuilder.io/reference/markers.html) \ No newline at end of file diff --git a/vendor/github.com/openshift/api/apps/OWNERS b/vendor/github.com/openshift/api/apps/OWNERS new file mode 100644 index 0000000000000000000000000000000000000000..d8d669b9106ed92eeb210fb36880cbdf42829bf8 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/OWNERS @@ -0,0 +1,3 @@ +reviewers: + - mfojtik + - soltysh diff --git a/vendor/github.com/openshift/api/apps/install.go b/vendor/github.com/openshift/api/apps/install.go new file mode 100644 index 0000000000000000000000000000000000000000..80f7ba2b2c9bf59dc528572c750d4c191bcb2658 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/install.go @@ -0,0 +1,26 @@ +package apps + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + appsv1 "github.com/openshift/api/apps/v1" +) + +const ( + GroupName = "apps.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(appsv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/apps/v1/consts.go b/vendor/github.com/openshift/api/apps/v1/consts.go new file mode 100644 index 0000000000000000000000000000000000000000..212578bccfb1bf67fc2cfde360b255f1626f1fe3 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/consts.go @@ -0,0 +1,108 @@ +package v1 + +const ( + // DeploymentStatusReasonAnnotation represents the reason for deployment being in a given state + // Used for specifying the reason for cancellation or failure of a deployment + // This is on replication controller set by deployer controller. + DeploymentStatusReasonAnnotation = "openshift.io/deployment.status-reason" + + // DeploymentPodAnnotation is an annotation on a deployment (a ReplicationController). The + // annotation value is the name of the deployer Pod which will act upon the ReplicationController + // to implement the deployment behavior. + // This is set on replication controller by deployer controller. + DeploymentPodAnnotation = "openshift.io/deployer-pod.name" + + // DeploymentConfigAnnotation is an annotation name used to correlate a deployment with the + // DeploymentConfig on which the deployment is based. + // This is set on replication controller pod template by deployer controller. + DeploymentConfigAnnotation = "openshift.io/deployment-config.name" + + // DeploymentCancelledAnnotation indicates that the deployment has been cancelled + // The annotation value does not matter and its mere presence indicates cancellation. + // This is set on replication controller by deployment config controller or oc rollout cancel command. + DeploymentCancelledAnnotation = "openshift.io/deployment.cancelled" + + // DeploymentEncodedConfigAnnotation is an annotation name used to retrieve specific encoded + // DeploymentConfig on which a given deployment is based. + // This is set on replication controller by deployer controller. + DeploymentEncodedConfigAnnotation = "openshift.io/encoded-deployment-config" + + // DeploymentVersionAnnotation is an annotation on a deployment (a ReplicationController). The + // annotation value is the LatestVersion value of the DeploymentConfig which was the basis for + // the deployment. + // This is set on replication controller pod template by deployment config controller. + DeploymentVersionAnnotation = "openshift.io/deployment-config.latest-version" + + // DeployerPodForDeploymentLabel is a label which groups pods related to a + // deployment. The value is a deployment name. The deployer pod and hook pods + // created by the internal strategies will have this label. Custom + // strategies can apply this label to any pods they create, enabling + // platform-provided cancellation and garbage collection support. + // This is set on deployer pod by deployer controller. + DeployerPodForDeploymentLabel = "openshift.io/deployer-pod-for.name" + + // DeploymentStatusAnnotation is an annotation name used to retrieve the DeploymentPhase of + // a deployment. + // This is set on replication controller by deployer controller. + DeploymentStatusAnnotation = "openshift.io/deployment.phase" +) + +type DeploymentConditionReason string + +var ( + // ReplicationControllerUpdatedReason is added in a deployment config when one of its replication + // controllers is updated as part of the rollout process. + ReplicationControllerUpdatedReason DeploymentConditionReason = "ReplicationControllerUpdated" + + // ReplicationControllerCreateError is added in a deployment config when it cannot create a new replication + // controller. + ReplicationControllerCreateErrorReason DeploymentConditionReason = "ReplicationControllerCreateError" + + // ReplicationControllerCreatedReason is added in a deployment config when it creates a new replication + // controller. + NewReplicationControllerCreatedReason DeploymentConditionReason = "NewReplicationControllerCreated" + + // NewReplicationControllerAvailableReason is added in a deployment config when its newest replication controller is made + // available ie. the number of new pods that have passed readiness checks and run for at least + // minReadySeconds is at least the minimum available pods that need to run for the deployment config. + NewReplicationControllerAvailableReason DeploymentConditionReason = "NewReplicationControllerAvailable" + + // ProgressDeadlineExceededReason is added in a deployment config when its newest replication controller fails to show + // any progress within the given deadline (progressDeadlineSeconds). + ProgressDeadlineExceededReason DeploymentConditionReason = "ProgressDeadlineExceeded" + + // DeploymentConfigPausedReason is added in a deployment config when it is paused. Lack of progress shouldn't be + // estimated once a deployment config is paused. + DeploymentConfigPausedReason DeploymentConditionReason = "DeploymentConfigPaused" + + // DeploymentConfigResumedReason is added in a deployment config when it is resumed. Useful for not failing accidentally + // deployment configs that paused amidst a rollout. + DeploymentConfigResumedReason DeploymentConditionReason = "DeploymentConfigResumed" + + // RolloutCancelledReason is added in a deployment config when its newest rollout was + // interrupted by cancellation. + RolloutCancelledReason DeploymentConditionReason = "RolloutCancelled" +) + +// DeploymentStatus describes the possible states a deployment can be in. +type DeploymentStatus string + +var ( + + // DeploymentStatusNew means the deployment has been accepted but not yet acted upon. + DeploymentStatusNew DeploymentStatus = "New" + + // DeploymentStatusPending means the deployment been handed over to a deployment strategy, + // but the strategy has not yet declared the deployment to be running. + DeploymentStatusPending DeploymentStatus = "Pending" + + // DeploymentStatusRunning means the deployment strategy has reported the deployment as + // being in-progress. + DeploymentStatusRunning DeploymentStatus = "Running" + + // DeploymentStatusComplete means the deployment finished without an error. + DeploymentStatusComplete DeploymentStatus = "Complete" + + // DeploymentStatusFailed means the deployment finished with an error. + DeploymentStatusFailed DeploymentStatus = "Failed" +) diff --git a/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go b/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go new file mode 100644 index 0000000000000000000000000000000000000000..31969786c4d1b97114439743898e3a2680166ff8 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go @@ -0,0 +1,38 @@ +package v1 + +// This file contains consts that are not shared between components and set just internally. +// They will likely be removed in (near) future. + +const ( + // DeployerPodCreatedAtAnnotation is an annotation on a deployment that + // records the time in RFC3339 format of when the deployer pod for this particular + // deployment was created. + // This is set by deployer controller, but not consumed by any command or internally. + // DEPRECATED: will be removed soon + DeployerPodCreatedAtAnnotation = "openshift.io/deployer-pod.created-at" + + // DeployerPodStartedAtAnnotation is an annotation on a deployment that + // records the time in RFC3339 format of when the deployer pod for this particular + // deployment was started. + // This is set by deployer controller, but not consumed by any command or internally. + // DEPRECATED: will be removed soon + DeployerPodStartedAtAnnotation = "openshift.io/deployer-pod.started-at" + + // DeployerPodCompletedAtAnnotation is an annotation on deployment that records + // the time in RFC3339 format of when the deployer pod finished. + // This is set by deployer controller, but not consumed by any command or internally. + // DEPRECATED: will be removed soon + DeployerPodCompletedAtAnnotation = "openshift.io/deployer-pod.completed-at" + + // DesiredReplicasAnnotation represents the desired number of replicas for a + // new deployment. + // This is set by deployer controller, but not consumed by any command or internally. + // DEPRECATED: will be removed soon + DesiredReplicasAnnotation = "kubectl.kubernetes.io/desired-replicas" + + // DeploymentAnnotation is an annotation on a deployer Pod. The annotation value is the name + // of the deployment (a ReplicationController) on which the deployer Pod acts. + // This is set by deployer controller and consumed internally and in oc adm top command. + // DEPRECATED: will be removed soon + DeploymentAnnotation = "openshift.io/deployment.name" +) diff --git a/vendor/github.com/openshift/api/apps/v1/doc.go b/vendor/github.com/openshift/api/apps/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..edd16297b545942abc4b1ea11d6c166e15f4b921 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/apps/apis/apps +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=apps.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/apps/v1/generated.pb.go b/vendor/github.com/openshift/api/apps/v1/generated.pb.go new file mode 100644 index 0000000000000000000000000000000000000000..4ade112e2c4febe59601730a1bc8292b5361a13d --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/generated.pb.go @@ -0,0 +1,7556 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/apps/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + v11 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *CustomDeploymentStrategyParams) Reset() { *m = CustomDeploymentStrategyParams{} } +func (*CustomDeploymentStrategyParams) ProtoMessage() {} +func (*CustomDeploymentStrategyParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{0} +} +func (m *CustomDeploymentStrategyParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomDeploymentStrategyParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomDeploymentStrategyParams.Merge(m, src) +} +func (m *CustomDeploymentStrategyParams) XXX_Size() int { + return m.Size() +} +func (m *CustomDeploymentStrategyParams) XXX_DiscardUnknown() { + xxx_messageInfo_CustomDeploymentStrategyParams.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomDeploymentStrategyParams proto.InternalMessageInfo + +func (m *DeploymentCause) Reset() { *m = DeploymentCause{} } +func (*DeploymentCause) ProtoMessage() {} +func (*DeploymentCause) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{1} +} +func (m *DeploymentCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCause.Merge(m, src) +} +func (m *DeploymentCause) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCause) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCause.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCause proto.InternalMessageInfo + +func (m *DeploymentCauseImageTrigger) Reset() { *m = DeploymentCauseImageTrigger{} } +func (*DeploymentCauseImageTrigger) ProtoMessage() {} +func (*DeploymentCauseImageTrigger) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{2} +} +func (m *DeploymentCauseImageTrigger) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCauseImageTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCauseImageTrigger) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCauseImageTrigger.Merge(m, src) +} +func (m *DeploymentCauseImageTrigger) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCauseImageTrigger) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCauseImageTrigger.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCauseImageTrigger proto.InternalMessageInfo + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{3} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo + +func (m *DeploymentConfig) Reset() { *m = DeploymentConfig{} } +func (*DeploymentConfig) ProtoMessage() {} +func (*DeploymentConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{4} +} +func (m *DeploymentConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfig.Merge(m, src) +} +func (m *DeploymentConfig) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfig) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfig proto.InternalMessageInfo + +func (m *DeploymentConfigList) Reset() { *m = DeploymentConfigList{} } +func (*DeploymentConfigList) ProtoMessage() {} +func (*DeploymentConfigList) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{5} +} +func (m *DeploymentConfigList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigList.Merge(m, src) +} +func (m *DeploymentConfigList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigList.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfigList proto.InternalMessageInfo + +func (m *DeploymentConfigRollback) Reset() { *m = DeploymentConfigRollback{} } +func (*DeploymentConfigRollback) ProtoMessage() {} +func (*DeploymentConfigRollback) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{6} +} +func (m *DeploymentConfigRollback) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigRollback) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigRollback.Merge(m, src) +} +func (m *DeploymentConfigRollback) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigRollback) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigRollback.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfigRollback proto.InternalMessageInfo + +func (m *DeploymentConfigRollbackSpec) Reset() { *m = DeploymentConfigRollbackSpec{} } +func (*DeploymentConfigRollbackSpec) ProtoMessage() {} +func (*DeploymentConfigRollbackSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{7} +} +func (m *DeploymentConfigRollbackSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigRollbackSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigRollbackSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigRollbackSpec.Merge(m, src) +} +func (m *DeploymentConfigRollbackSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigRollbackSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigRollbackSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfigRollbackSpec proto.InternalMessageInfo + +func (m *DeploymentConfigSpec) Reset() { *m = DeploymentConfigSpec{} } +func (*DeploymentConfigSpec) ProtoMessage() {} +func (*DeploymentConfigSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{8} +} +func (m *DeploymentConfigSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigSpec.Merge(m, src) +} +func (m *DeploymentConfigSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfigSpec proto.InternalMessageInfo + +func (m *DeploymentConfigStatus) Reset() { *m = DeploymentConfigStatus{} } +func (*DeploymentConfigStatus) ProtoMessage() {} +func (*DeploymentConfigStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{9} +} +func (m *DeploymentConfigStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigStatus.Merge(m, src) +} +func (m *DeploymentConfigStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfigStatus proto.InternalMessageInfo + +func (m *DeploymentDetails) Reset() { *m = DeploymentDetails{} } +func (*DeploymentDetails) ProtoMessage() {} +func (*DeploymentDetails) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{10} +} +func (m *DeploymentDetails) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentDetails) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentDetails.Merge(m, src) +} +func (m *DeploymentDetails) XXX_Size() int { + return m.Size() +} +func (m *DeploymentDetails) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentDetails.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentDetails proto.InternalMessageInfo + +func (m *DeploymentLog) Reset() { *m = DeploymentLog{} } +func (*DeploymentLog) ProtoMessage() {} +func (*DeploymentLog) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{11} +} +func (m *DeploymentLog) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentLog.Merge(m, src) +} +func (m *DeploymentLog) XXX_Size() int { + return m.Size() +} +func (m *DeploymentLog) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentLog.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentLog proto.InternalMessageInfo + +func (m *DeploymentLogOptions) Reset() { *m = DeploymentLogOptions{} } +func (*DeploymentLogOptions) ProtoMessage() {} +func (*DeploymentLogOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{12} +} +func (m *DeploymentLogOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentLogOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentLogOptions.Merge(m, src) +} +func (m *DeploymentLogOptions) XXX_Size() int { + return m.Size() +} +func (m *DeploymentLogOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentLogOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentLogOptions proto.InternalMessageInfo + +func (m *DeploymentRequest) Reset() { *m = DeploymentRequest{} } +func (*DeploymentRequest) ProtoMessage() {} +func (*DeploymentRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{13} +} +func (m *DeploymentRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentRequest.Merge(m, src) +} +func (m *DeploymentRequest) XXX_Size() int { + return m.Size() +} +func (m *DeploymentRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentRequest proto.InternalMessageInfo + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{14} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo + +func (m *DeploymentTriggerImageChangeParams) Reset() { *m = DeploymentTriggerImageChangeParams{} } +func (*DeploymentTriggerImageChangeParams) ProtoMessage() {} +func (*DeploymentTriggerImageChangeParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{15} +} +func (m *DeploymentTriggerImageChangeParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentTriggerImageChangeParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentTriggerImageChangeParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentTriggerImageChangeParams.Merge(m, src) +} +func (m *DeploymentTriggerImageChangeParams) XXX_Size() int { + return m.Size() +} +func (m *DeploymentTriggerImageChangeParams) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentTriggerImageChangeParams.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentTriggerImageChangeParams proto.InternalMessageInfo + +func (m *DeploymentTriggerPolicies) Reset() { *m = DeploymentTriggerPolicies{} } +func (*DeploymentTriggerPolicies) ProtoMessage() {} +func (*DeploymentTriggerPolicies) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{16} +} +func (m *DeploymentTriggerPolicies) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentTriggerPolicies) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentTriggerPolicies) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentTriggerPolicies.Merge(m, src) +} +func (m *DeploymentTriggerPolicies) XXX_Size() int { + return m.Size() +} +func (m *DeploymentTriggerPolicies) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentTriggerPolicies.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentTriggerPolicies proto.InternalMessageInfo + +func (m *DeploymentTriggerPolicy) Reset() { *m = DeploymentTriggerPolicy{} } +func (*DeploymentTriggerPolicy) ProtoMessage() {} +func (*DeploymentTriggerPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{17} +} +func (m *DeploymentTriggerPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentTriggerPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentTriggerPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentTriggerPolicy.Merge(m, src) +} +func (m *DeploymentTriggerPolicy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentTriggerPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentTriggerPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentTriggerPolicy proto.InternalMessageInfo + +func (m *ExecNewPodHook) Reset() { *m = ExecNewPodHook{} } +func (*ExecNewPodHook) ProtoMessage() {} +func (*ExecNewPodHook) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{18} +} +func (m *ExecNewPodHook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecNewPodHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExecNewPodHook) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecNewPodHook.Merge(m, src) +} +func (m *ExecNewPodHook) XXX_Size() int { + return m.Size() +} +func (m *ExecNewPodHook) XXX_DiscardUnknown() { + xxx_messageInfo_ExecNewPodHook.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecNewPodHook proto.InternalMessageInfo + +func (m *LifecycleHook) Reset() { *m = LifecycleHook{} } +func (*LifecycleHook) ProtoMessage() {} +func (*LifecycleHook) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{19} +} +func (m *LifecycleHook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LifecycleHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LifecycleHook) XXX_Merge(src proto.Message) { + xxx_messageInfo_LifecycleHook.Merge(m, src) +} +func (m *LifecycleHook) XXX_Size() int { + return m.Size() +} +func (m *LifecycleHook) XXX_DiscardUnknown() { + xxx_messageInfo_LifecycleHook.DiscardUnknown(m) +} + +var xxx_messageInfo_LifecycleHook proto.InternalMessageInfo + +func (m *RecreateDeploymentStrategyParams) Reset() { *m = RecreateDeploymentStrategyParams{} } +func (*RecreateDeploymentStrategyParams) ProtoMessage() {} +func (*RecreateDeploymentStrategyParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{20} +} +func (m *RecreateDeploymentStrategyParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RecreateDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RecreateDeploymentStrategyParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecreateDeploymentStrategyParams.Merge(m, src) +} +func (m *RecreateDeploymentStrategyParams) XXX_Size() int { + return m.Size() +} +func (m *RecreateDeploymentStrategyParams) XXX_DiscardUnknown() { + xxx_messageInfo_RecreateDeploymentStrategyParams.DiscardUnknown(m) +} + +var xxx_messageInfo_RecreateDeploymentStrategyParams proto.InternalMessageInfo + +func (m *RollingDeploymentStrategyParams) Reset() { *m = RollingDeploymentStrategyParams{} } +func (*RollingDeploymentStrategyParams) ProtoMessage() {} +func (*RollingDeploymentStrategyParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{21} +} +func (m *RollingDeploymentStrategyParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingDeploymentStrategyParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingDeploymentStrategyParams.Merge(m, src) +} +func (m *RollingDeploymentStrategyParams) XXX_Size() int { + return m.Size() +} +func (m *RollingDeploymentStrategyParams) XXX_DiscardUnknown() { + xxx_messageInfo_RollingDeploymentStrategyParams.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingDeploymentStrategyParams proto.InternalMessageInfo + +func (m *TagImageHook) Reset() { *m = TagImageHook{} } +func (*TagImageHook) ProtoMessage() {} +func (*TagImageHook) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{22} +} +func (m *TagImageHook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagImageHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagImageHook) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagImageHook.Merge(m, src) +} +func (m *TagImageHook) XXX_Size() int { + return m.Size() +} +func (m *TagImageHook) XXX_DiscardUnknown() { + xxx_messageInfo_TagImageHook.DiscardUnknown(m) +} + +var xxx_messageInfo_TagImageHook proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CustomDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.CustomDeploymentStrategyParams") + proto.RegisterType((*DeploymentCause)(nil), "github.com.openshift.api.apps.v1.DeploymentCause") + proto.RegisterType((*DeploymentCauseImageTrigger)(nil), "github.com.openshift.api.apps.v1.DeploymentCauseImageTrigger") + proto.RegisterType((*DeploymentCondition)(nil), "github.com.openshift.api.apps.v1.DeploymentCondition") + proto.RegisterType((*DeploymentConfig)(nil), "github.com.openshift.api.apps.v1.DeploymentConfig") + proto.RegisterType((*DeploymentConfigList)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigList") + proto.RegisterType((*DeploymentConfigRollback)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollback") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollback.UpdatedAnnotationsEntry") + proto.RegisterType((*DeploymentConfigRollbackSpec)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollbackSpec") + proto.RegisterType((*DeploymentConfigSpec)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigSpec") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigSpec.SelectorEntry") + proto.RegisterType((*DeploymentConfigStatus)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigStatus") + proto.RegisterType((*DeploymentDetails)(nil), "github.com.openshift.api.apps.v1.DeploymentDetails") + proto.RegisterType((*DeploymentLog)(nil), "github.com.openshift.api.apps.v1.DeploymentLog") + proto.RegisterType((*DeploymentLogOptions)(nil), "github.com.openshift.api.apps.v1.DeploymentLogOptions") + proto.RegisterType((*DeploymentRequest)(nil), "github.com.openshift.api.apps.v1.DeploymentRequest") + proto.RegisterType((*DeploymentStrategy)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy.LabelsEntry") + proto.RegisterType((*DeploymentTriggerImageChangeParams)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerImageChangeParams") + proto.RegisterType((*DeploymentTriggerPolicies)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerPolicies") + proto.RegisterType((*DeploymentTriggerPolicy)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerPolicy") + proto.RegisterType((*ExecNewPodHook)(nil), "github.com.openshift.api.apps.v1.ExecNewPodHook") + proto.RegisterType((*LifecycleHook)(nil), "github.com.openshift.api.apps.v1.LifecycleHook") + proto.RegisterType((*RecreateDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.RecreateDeploymentStrategyParams") + proto.RegisterType((*RollingDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.RollingDeploymentStrategyParams") + proto.RegisterType((*TagImageHook)(nil), "github.com.openshift.api.apps.v1.TagImageHook") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/apps/v1/generated.proto", fileDescriptor_8f1b1bee37da74c1) +} + +var fileDescriptor_8f1b1bee37da74c1 = []byte{ + // 2520 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0xcd, 0x6f, 0x5b, 0x59, + 0x15, 0xcf, 0x8b, 0xed, 0xc4, 0x3e, 0xf9, 0x6a, 0x6e, 0xfa, 0xe1, 0xc9, 0xa0, 0x38, 0xf2, 0x68, + 0x86, 0x00, 0x83, 0x3d, 0xcd, 0x94, 0xd1, 0xb4, 0xd5, 0x0c, 0xc4, 0x69, 0x3a, 0x93, 0xca, 0x69, + 0xc3, 0x4d, 0xda, 0xd2, 0x0a, 0x41, 0x6f, 0x9e, 0x6f, 0x9c, 0x3b, 0x79, 0xef, 0x5d, 0xf3, 0xde, + 0xb5, 0x5b, 0x23, 0x84, 0x66, 0x03, 0x12, 0xd2, 0x2c, 0x58, 0xc2, 0x06, 0xb1, 0x60, 0x0b, 0x62, + 0xc1, 0x1e, 0xb1, 0x40, 0xea, 0x02, 0xa4, 0x91, 0x90, 0x60, 0x84, 0x50, 0x34, 0x0d, 0x3b, 0xfe, + 0x84, 0xae, 0xd0, 0xfd, 0x78, 0x5f, 0xfe, 0x68, 0xe2, 0xb4, 0x3b, 0xbf, 0xf3, 0xf1, 0x3b, 0xe7, + 0x9e, 0x7b, 0xce, 0xb9, 0xe7, 0x5e, 0xc3, 0x3b, 0x4d, 0x26, 0x0e, 0xda, 0x7b, 0x15, 0x9b, 0xbb, + 0x55, 0xde, 0xa2, 0x5e, 0x70, 0xc0, 0xf6, 0x45, 0x95, 0xb4, 0x58, 0x95, 0xb4, 0x5a, 0x41, 0xb5, + 0x73, 0xb9, 0xda, 0xa4, 0x1e, 0xf5, 0x89, 0xa0, 0x8d, 0x4a, 0xcb, 0xe7, 0x82, 0xa3, 0xe5, 0x58, + 0xa3, 0x12, 0x69, 0x54, 0x48, 0x8b, 0x55, 0xa4, 0x46, 0xa5, 0x73, 0x79, 0xf1, 0x9b, 0x09, 0xcc, + 0x26, 0x6f, 0xf2, 0xaa, 0x52, 0xdc, 0x6b, 0xef, 0xab, 0x2f, 0xf5, 0xa1, 0x7e, 0x69, 0xc0, 0xc5, + 0xf2, 0xe1, 0xfb, 0x41, 0x85, 0x71, 0x65, 0xd4, 0xe6, 0x3e, 0x1d, 0x60, 0x74, 0xf1, 0x4a, 0x2c, + 0xe3, 0x12, 0xfb, 0x80, 0x79, 0xd4, 0xef, 0x56, 0x5b, 0x87, 0x4d, 0x49, 0x08, 0xaa, 0x2e, 0x15, + 0x64, 0x90, 0xd6, 0x7b, 0xc3, 0xb4, 0xfc, 0xb6, 0x27, 0x98, 0x4b, 0xab, 0x81, 0x7d, 0x40, 0x5d, + 0xd2, 0xa7, 0xf7, 0xee, 0x30, 0xbd, 0xb6, 0x60, 0x4e, 0x95, 0x79, 0x22, 0x10, 0x7e, 0xaf, 0x52, + 0xf9, 0xcf, 0x16, 0x2c, 0xad, 0xb7, 0x03, 0xc1, 0xdd, 0x1b, 0xb4, 0xe5, 0xf0, 0xae, 0x4b, 0x3d, + 0xb1, 0x23, 0xa4, 0x44, 0xb3, 0xbb, 0x4d, 0x7c, 0xe2, 0x06, 0xe8, 0x0d, 0xc8, 0x31, 0x97, 0x34, + 0x69, 0xd1, 0x5a, 0xb6, 0x56, 0x0a, 0xb5, 0x99, 0xa7, 0x47, 0xa5, 0xb1, 0xe3, 0xa3, 0x52, 0x6e, + 0x53, 0x12, 0xb1, 0xe6, 0xa1, 0xef, 0xc2, 0x14, 0xf5, 0x3a, 0xcc, 0xe7, 0x9e, 0x44, 0x28, 0x8e, + 0x2f, 0x67, 0x56, 0xa6, 0x56, 0x17, 0x2b, 0xda, 0x25, 0x15, 0x67, 0x19, 0xa4, 0x4a, 0xe7, 0x72, + 0x65, 0xc3, 0xeb, 0xdc, 0x23, 0x7e, 0x6d, 0xc1, 0xc0, 0x4c, 0x6d, 0xc4, 0x6a, 0x38, 0x89, 0x81, + 0xde, 0x84, 0x49, 0x9b, 0xbb, 0x2e, 0xf1, 0x1a, 0xc5, 0xcc, 0x72, 0x66, 0xa5, 0x50, 0x9b, 0x3a, + 0x3e, 0x2a, 0x4d, 0xae, 0x6b, 0x12, 0x0e, 0x79, 0xe5, 0xbf, 0x58, 0x30, 0x17, 0xfb, 0xbe, 0x4e, + 0xda, 0x01, 0x45, 0x57, 0x21, 0x2b, 0xba, 0xad, 0xd0, 0xe3, 0x37, 0x8d, 0xa9, 0xec, 0x6e, 0xb7, + 0x45, 0x9f, 0x1f, 0x95, 0x2e, 0xc4, 0xe2, 0xbb, 0x3e, 0x6b, 0x36, 0xa9, 0x2f, 0x19, 0x58, 0xa9, + 0xa0, 0x00, 0xa6, 0xd5, 0x8a, 0x0c, 0xa7, 0x38, 0xbe, 0x6c, 0xad, 0x4c, 0xad, 0x7e, 0x50, 0x39, + 0x29, 0x7f, 0x2a, 0x3d, 0x3e, 0x6c, 0x26, 0x40, 0x6a, 0xe7, 0x8e, 0x8f, 0x4a, 0xd3, 0x49, 0x0a, + 0x4e, 0x19, 0x29, 0x37, 0xe0, 0xf5, 0x17, 0xa8, 0xa3, 0x0d, 0xc8, 0xee, 0xfb, 0xdc, 0x55, 0xcb, + 0x99, 0x5a, 0x7d, 0x63, 0x50, 0x54, 0xef, 0xec, 0x7d, 0x42, 0x6d, 0x81, 0xe9, 0x3e, 0xf5, 0xa9, + 0x67, 0xd3, 0xda, 0x74, 0xb8, 0xe6, 0x9b, 0x3e, 0x77, 0xb1, 0x52, 0x2f, 0xff, 0x2b, 0x03, 0x0b, + 0x09, 0x33, 0xdc, 0x6b, 0x30, 0xc1, 0xb8, 0x87, 0xae, 0xa7, 0xa2, 0xf5, 0xd5, 0x9e, 0x68, 0x5d, + 0x1a, 0xa0, 0x92, 0x88, 0x57, 0x1d, 0x26, 0x02, 0x41, 0x44, 0x3b, 0x50, 0x91, 0x2a, 0xd4, 0xae, + 0x18, 0xf5, 0x89, 0x1d, 0x45, 0x7d, 0x7e, 0x54, 0x1a, 0x50, 0x29, 0x95, 0x08, 0x49, 0x4b, 0x61, + 0x83, 0x81, 0x3e, 0x81, 0x59, 0x87, 0x04, 0xe2, 0x6e, 0xab, 0x41, 0x04, 0xdd, 0x65, 0x2e, 0x2d, + 0x4e, 0xa8, 0x35, 0x7f, 0x3d, 0xb1, 0xe6, 0x28, 0xb9, 0x2b, 0xad, 0xc3, 0xa6, 0x24, 0x04, 0x15, + 0x59, 0x4a, 0x32, 0x0a, 0x52, 0xa3, 0x76, 0xd1, 0x78, 0x30, 0x5b, 0x4f, 0x21, 0xe1, 0x1e, 0x64, + 0xd4, 0x01, 0x24, 0x29, 0xbb, 0x3e, 0xf1, 0x02, 0xbd, 0x2a, 0x69, 0x2f, 0x33, 0xb2, 0xbd, 0x45, + 0x63, 0x0f, 0xd5, 0xfb, 0xd0, 0xf0, 0x00, 0x0b, 0xe8, 0x2d, 0x98, 0xf0, 0x29, 0x09, 0xb8, 0x57, + 0xcc, 0xaa, 0x88, 0xcd, 0x86, 0x11, 0xc3, 0x8a, 0x8a, 0x0d, 0x17, 0x7d, 0x0d, 0x26, 0x5d, 0x1a, + 0x04, 0xb2, 0xf2, 0x72, 0x4a, 0x70, 0xce, 0x08, 0x4e, 0x6e, 0x69, 0x32, 0x0e, 0xf9, 0xe5, 0x3f, + 0x8e, 0xc3, 0xb9, 0xd4, 0x36, 0xed, 0xb3, 0x26, 0x7a, 0x04, 0x79, 0xe9, 0x67, 0x83, 0x08, 0x62, + 0x32, 0xe7, 0x9d, 0xd3, 0xad, 0x4a, 0xe7, 0xd2, 0x16, 0x15, 0xa4, 0x86, 0x8c, 0x49, 0x88, 0x69, + 0x38, 0x42, 0x45, 0xdf, 0x83, 0x6c, 0xd0, 0xa2, 0xb6, 0xa9, 0x91, 0xf7, 0x46, 0xaa, 0x11, 0xe5, + 0xe3, 0x4e, 0x8b, 0xda, 0x71, 0xaa, 0xca, 0x2f, 0xac, 0x10, 0xd1, 0xa3, 0x28, 0xab, 0xf4, 0x7e, + 0xbc, 0x7f, 0x06, 0x6c, 0xa5, 0x1f, 0x47, 0x37, 0x9d, 0x69, 0xe5, 0xbf, 0x5b, 0x70, 0xbe, 0x57, + 0xa5, 0xce, 0x02, 0x81, 0xbe, 0xdf, 0x17, 0xb6, 0xca, 0xe9, 0xc2, 0x26, 0xb5, 0x55, 0xd0, 0xce, + 0x19, 0x93, 0xf9, 0x90, 0x92, 0x08, 0xd9, 0x7d, 0xc8, 0x31, 0x41, 0xdd, 0xc0, 0x74, 0xc8, 0xd5, + 0xd1, 0xd7, 0x95, 0x68, 0xc0, 0x12, 0x08, 0x6b, 0xbc, 0xf2, 0xcf, 0x33, 0x50, 0xec, 0x15, 0xc5, + 0xdc, 0x71, 0xf6, 0x88, 0x7d, 0x88, 0x96, 0x21, 0xeb, 0x11, 0x37, 0xac, 0xf0, 0x28, 0xe0, 0xb7, + 0x89, 0x4b, 0xb1, 0xe2, 0xa0, 0xdf, 0x58, 0x80, 0xda, 0xaa, 0x36, 0x1a, 0x6b, 0x9e, 0xc7, 0x05, + 0x91, 0xe9, 0x1a, 0x7a, 0x89, 0x47, 0xf7, 0x32, 0x34, 0x5d, 0xb9, 0xdb, 0x07, 0xba, 0xe1, 0x09, + 0xbf, 0x1b, 0x57, 0x4d, 0xbf, 0x00, 0x1e, 0xe0, 0x09, 0x7a, 0x64, 0x72, 0x4d, 0xe7, 0xc3, 0x87, + 0x67, 0xf7, 0x68, 0x58, 0xce, 0x2d, 0x6e, 0xc0, 0xa5, 0x21, 0xce, 0xa2, 0x73, 0x90, 0x39, 0xa4, + 0x5d, 0x1d, 0x3e, 0x2c, 0x7f, 0xa2, 0xf3, 0x90, 0xeb, 0x10, 0xa7, 0x4d, 0x75, 0xd7, 0xc3, 0xfa, + 0xe3, 0xda, 0xf8, 0xfb, 0x56, 0xf9, 0x4f, 0x19, 0xf8, 0xca, 0x8b, 0x6c, 0xbf, 0xa2, 0x6e, 0x8e, + 0xde, 0x86, 0xbc, 0x4f, 0x3b, 0x2c, 0x60, 0xdc, 0x53, 0x4e, 0x64, 0xe2, 0xbc, 0xc3, 0x86, 0x8e, + 0x23, 0x09, 0xb4, 0x06, 0x73, 0xcc, 0xb3, 0x9d, 0x76, 0x23, 0x3c, 0x54, 0x74, 0x65, 0xe5, 0x6b, + 0x97, 0x8c, 0xd2, 0xdc, 0x66, 0x9a, 0x8d, 0x7b, 0xe5, 0x93, 0x10, 0xd4, 0x6d, 0x39, 0x44, 0x50, + 0xd5, 0xc0, 0x06, 0x40, 0x18, 0x36, 0xee, 0x95, 0x47, 0xf7, 0xe0, 0xa2, 0x21, 0x61, 0xda, 0x72, + 0x98, 0xad, 0x62, 0x2c, 0x2b, 0x44, 0x75, 0xb8, 0x7c, 0x6d, 0xc9, 0x20, 0x5d, 0xdc, 0x1c, 0x28, + 0x85, 0x87, 0x68, 0x27, 0x5c, 0x0b, 0x67, 0x17, 0x75, 0x6e, 0xf4, 0xbb, 0x16, 0xb2, 0x71, 0xaf, + 0x7c, 0xf9, 0x7f, 0xb9, 0xfe, 0x7e, 0xa0, 0xb6, 0x6b, 0x0f, 0xf2, 0x41, 0x08, 0xaa, 0xb7, 0xec, + 0xca, 0x28, 0xc9, 0x17, 0x1a, 0x88, 0x77, 0x27, 0xf2, 0x21, 0xc2, 0x95, 0xfe, 0xbb, 0xcc, 0xc3, + 0x94, 0x34, 0xba, 0x3b, 0xd4, 0xe6, 0x5e, 0x23, 0x28, 0x16, 0x96, 0xad, 0x95, 0x5c, 0xec, 0xff, + 0x56, 0x9a, 0x8d, 0x7b, 0xe5, 0x11, 0x85, 0xbc, 0x08, 0x77, 0x56, 0xf7, 0xe3, 0xeb, 0xa3, 0xb8, + 0x69, 0x76, 0x79, 0x9b, 0x3b, 0xcc, 0x66, 0x34, 0xa8, 0x4d, 0x4b, 0x4f, 0xa3, 0x5c, 0x88, 0xa0, + 0x75, 0xd6, 0xa9, 0xe0, 0xeb, 0x04, 0xca, 0x25, 0xb3, 0x4e, 0xd3, 0x71, 0x24, 0x81, 0xea, 0x70, + 0x3e, 0xcc, 0xc0, 0x8f, 0x59, 0x20, 0xb8, 0xdf, 0xad, 0x33, 0x97, 0x09, 0x95, 0x37, 0xb9, 0x5a, + 0xf1, 0xf8, 0xa8, 0x74, 0x1e, 0x0f, 0xe0, 0xe3, 0x81, 0x5a, 0xb2, 0x8b, 0x09, 0x1a, 0x08, 0x93, + 0x2b, 0x51, 0x4d, 0xec, 0xd2, 0x40, 0x60, 0xc5, 0x91, 0x47, 0x6b, 0x4b, 0x4e, 0x4f, 0x0d, 0xb3, + 0xfd, 0x51, 0xf3, 0xdf, 0x56, 0x54, 0x6c, 0xb8, 0xc8, 0x87, 0x7c, 0x40, 0x1d, 0x6a, 0x0b, 0xee, + 0x17, 0x27, 0x55, 0x8b, 0xbb, 0x71, 0xb6, 0xc3, 0xab, 0xb2, 0x63, 0x60, 0x74, 0x53, 0x8b, 0xf7, + 0xd8, 0x90, 0x71, 0x64, 0x07, 0x6d, 0x41, 0x5e, 0x84, 0x75, 0x93, 0x1f, 0x5e, 0xfa, 0xdb, 0xbc, + 0x11, 0x96, 0x8b, 0xee, 0x54, 0x6a, 0x23, 0xc2, 0x8a, 0x8a, 0x20, 0x16, 0xaf, 0xc3, 0x4c, 0xca, + 0xf6, 0x48, 0x3d, 0xea, 0x0f, 0x39, 0xb8, 0x38, 0xf8, 0xbc, 0x44, 0xd7, 0x61, 0x46, 0xe2, 0x07, + 0xe2, 0x1e, 0xf5, 0x55, 0x6f, 0xb1, 0x54, 0x6f, 0xb9, 0x60, 0x56, 0x36, 0x53, 0x4f, 0x32, 0x71, + 0x5a, 0x16, 0xdd, 0x02, 0xc4, 0xf7, 0x02, 0xea, 0x77, 0x68, 0xe3, 0x23, 0x7d, 0xd1, 0x88, 0xbb, + 0x53, 0xd4, 0xf0, 0xef, 0xf4, 0x49, 0xe0, 0x01, 0x5a, 0x23, 0x66, 0xda, 0x1a, 0xcc, 0x99, 0x43, + 0x23, 0x64, 0x9a, 0x24, 0x8b, 0x2a, 0xe8, 0x6e, 0x9a, 0x8d, 0x7b, 0xe5, 0xd1, 0x47, 0x30, 0x4f, + 0x3a, 0x84, 0x39, 0x64, 0xcf, 0xa1, 0x11, 0x48, 0x4e, 0x81, 0xbc, 0x66, 0x40, 0xe6, 0xd7, 0x7a, + 0x05, 0x70, 0xbf, 0x0e, 0xda, 0x82, 0x85, 0xb6, 0xd7, 0x0f, 0x35, 0xa1, 0xa0, 0x5e, 0x37, 0x50, + 0x0b, 0x77, 0xfb, 0x45, 0xf0, 0x20, 0x3d, 0xf4, 0x10, 0x26, 0x1b, 0x54, 0x10, 0xe6, 0x04, 0xc5, + 0x49, 0x95, 0x37, 0xef, 0x8e, 0x92, 0xab, 0x37, 0xb4, 0xaa, 0xbe, 0x3c, 0x99, 0x0f, 0x1c, 0x02, + 0x22, 0x06, 0x60, 0x87, 0xa3, 0x78, 0x50, 0xcc, 0xab, 0x52, 0xf8, 0xd6, 0x88, 0xa5, 0xa0, 0xb5, + 0xe3, 0x51, 0x31, 0x22, 0x05, 0x38, 0x01, 0x2e, 0x13, 0xcb, 0x97, 0x0d, 0x2b, 0x8a, 0x87, 0xee, + 0x70, 0x51, 0x62, 0xe1, 0x24, 0x13, 0xa7, 0x65, 0xcb, 0xbf, 0xb6, 0x60, 0xbe, 0x6f, 0x4d, 0xc9, + 0x09, 0xd9, 0x7a, 0xf1, 0x84, 0x8c, 0x1e, 0xc0, 0x84, 0x2d, 0x6b, 0x3f, 0x1c, 0x69, 0x2e, 0x8f, + 0x7c, 0xa1, 0x8b, 0x9b, 0x89, 0xfa, 0x0c, 0xb0, 0x01, 0x2c, 0xcf, 0xc1, 0x4c, 0x2c, 0x5a, 0xe7, + 0xcd, 0xf2, 0x67, 0xd9, 0xe4, 0x51, 0x52, 0xe7, 0xcd, 0x3b, 0x2d, 0x1d, 0x82, 0x2a, 0x14, 0x6c, + 0xee, 0x09, 0x22, 0x07, 0x48, 0xe3, 0xf1, 0xbc, 0x01, 0x2d, 0xac, 0x87, 0x0c, 0x1c, 0xcb, 0xc8, + 0x7e, 0xb6, 0xcf, 0x1d, 0x87, 0x3f, 0x56, 0x35, 0x94, 0xe8, 0x67, 0x37, 0x15, 0x15, 0x1b, 0xae, + 0xac, 0x95, 0x96, 0x6c, 0x99, 0xbc, 0x1d, 0x1e, 0xeb, 0x51, 0xad, 0x6c, 0x1b, 0x3a, 0x8e, 0x24, + 0xd0, 0x15, 0x98, 0x0e, 0x98, 0x67, 0xd3, 0xf0, 0xa8, 0xc9, 0xea, 0xe9, 0x41, 0xde, 0x51, 0x77, + 0x12, 0x74, 0x9c, 0x92, 0x42, 0xf7, 0xa1, 0xa0, 0xbe, 0xd5, 0x2d, 0x29, 0x37, 0xf2, 0x2d, 0x69, + 0x46, 0x2e, 0x72, 0x27, 0x04, 0xc0, 0x31, 0x16, 0x5a, 0x05, 0x10, 0xcc, 0xa5, 0x81, 0x20, 0x6e, + 0x2b, 0x30, 0x8d, 0x3b, 0x4a, 0xa6, 0xdd, 0x88, 0x83, 0x13, 0x52, 0xe8, 0x1b, 0x50, 0x90, 0x29, + 0x50, 0x67, 0x1e, 0xd5, 0x55, 0x91, 0xd1, 0x06, 0x76, 0x43, 0x22, 0x8e, 0xf9, 0xa8, 0x02, 0xe0, + 0xc8, 0x03, 0xa4, 0xd6, 0x15, 0x34, 0x50, 0xbd, 0x37, 0x53, 0x9b, 0x95, 0xe0, 0xf5, 0x88, 0x8a, + 0x13, 0x12, 0x32, 0xea, 0x1e, 0x7f, 0x4c, 0x98, 0x50, 0x29, 0x9a, 0x88, 0xfa, 0x6d, 0x7e, 0x9f, + 0x30, 0x81, 0x0d, 0x17, 0xbd, 0x09, 0x93, 0x1d, 0xd3, 0x24, 0x41, 0x81, 0xaa, 0x1a, 0x0b, 0x5b, + 0x63, 0xc8, 0x2b, 0xff, 0x3b, 0x95, 0xbb, 0x98, 0xfe, 0xa8, 0x2d, 0x8f, 0xaa, 0x93, 0x47, 0xf2, + 0xb7, 0x60, 0x42, 0x77, 0xd7, 0xde, 0xcd, 0xd7, 0x2d, 0x18, 0x1b, 0x2e, 0x7a, 0x03, 0x72, 0xfb, + 0xdc, 0xb7, 0xa9, 0xd9, 0xf9, 0xe8, 0x7a, 0x70, 0x53, 0x12, 0xb1, 0xe6, 0xa1, 0x7b, 0x30, 0x47, + 0x9f, 0xa4, 0xe7, 0xbf, 0xac, 0x7a, 0x54, 0x79, 0x5b, 0xf6, 0xc6, 0x8d, 0x34, 0x6b, 0xf8, 0x1b, + 0x49, 0x2f, 0x48, 0xf9, 0x1f, 0x93, 0x80, 0xfa, 0x87, 0x1d, 0x74, 0x2d, 0xf5, 0xa4, 0xf0, 0x56, + 0xcf, 0x93, 0xc2, 0xc5, 0x7e, 0x8d, 0xc4, 0x8b, 0x42, 0x07, 0xa6, 0x6d, 0xf5, 0x22, 0xa5, 0xdf, + 0x9f, 0xcc, 0x34, 0xf3, 0x9d, 0x93, 0x0b, 0xf6, 0xc5, 0xef, 0x58, 0x3a, 0xc1, 0xd7, 0x13, 0xc8, + 0x38, 0x65, 0x07, 0xfd, 0x14, 0x66, 0x7d, 0x6a, 0xfb, 0x94, 0x08, 0x6a, 0x2c, 0xeb, 0xbb, 0x46, + 0xed, 0x64, 0xcb, 0xd8, 0xe8, 0x0d, 0xb5, 0x8d, 0x8e, 0x8f, 0x4a, 0xb3, 0x38, 0x85, 0x8e, 0x7b, + 0xac, 0xa1, 0x1f, 0xc3, 0x8c, 0xcf, 0x1d, 0x87, 0x79, 0x4d, 0x63, 0x3e, 0xab, 0xcc, 0xaf, 0x9d, + 0xc2, 0xbc, 0x56, 0x1b, 0x6a, 0x7d, 0x5e, 0xf5, 0xd7, 0x24, 0x36, 0x4e, 0x9b, 0x42, 0x0f, 0xa0, + 0xe0, 0xd3, 0x80, 0xb7, 0x7d, 0x9b, 0x06, 0xa6, 0xb8, 0x57, 0x06, 0x4d, 0x27, 0xd8, 0x08, 0xc9, + 0x2c, 0x66, 0x3e, 0x95, 0xb6, 0x82, 0xb8, 0x87, 0x85, 0xdc, 0x00, 0xc7, 0x68, 0xe8, 0x40, 0xa6, + 0xf1, 0x1e, 0x75, 0x64, 0x69, 0x67, 0x4e, 0xb7, 0x91, 0xfd, 0x0b, 0xa9, 0xd4, 0x15, 0x84, 0x9e, + 0xb2, 0x12, 0x85, 0x20, 0x89, 0xd8, 0xe0, 0xa3, 0x9f, 0xc0, 0x14, 0x49, 0xdc, 0x5d, 0xf5, 0x60, + 0xb7, 0x71, 0x26, 0x73, 0x7d, 0xd7, 0xd5, 0xe8, 0xb9, 0x32, 0x79, 0x4f, 0x4d, 0x9a, 0x43, 0x77, + 0xe0, 0x02, 0xb1, 0x05, 0xeb, 0xd0, 0x1b, 0x94, 0x34, 0x1c, 0xe6, 0x45, 0xed, 0x55, 0x37, 0x9c, + 0xd7, 0x8e, 0x8f, 0x4a, 0x17, 0xd6, 0x06, 0x09, 0xe0, 0xc1, 0x7a, 0x8b, 0x57, 0x61, 0x2a, 0xb1, + 0xea, 0x51, 0xe6, 0xbb, 0xc5, 0x0f, 0xe1, 0xdc, 0x4b, 0xdd, 0x61, 0x7f, 0x37, 0x0e, 0xe5, 0xbe, + 0x06, 0xa0, 0x9e, 0x24, 0xd7, 0x0f, 0x88, 0xd7, 0x0c, 0x33, 0xb6, 0x0a, 0x05, 0xd2, 0x16, 0xdc, + 0x25, 0x82, 0xd9, 0x0a, 0x38, 0x1f, 0xe7, 0xc2, 0x5a, 0xc8, 0xc0, 0xb1, 0x0c, 0xba, 0x06, 0xb3, + 0xd1, 0xe1, 0x26, 0x3b, 0x9d, 0x3e, 0x8d, 0x0b, 0xba, 0x3c, 0xd6, 0x53, 0x1c, 0xdc, 0x23, 0x19, + 0x5d, 0x9b, 0x33, 0x2f, 0x77, 0x6d, 0xbe, 0x15, 0xbe, 0xfa, 0xa9, 0x35, 0xd1, 0x86, 0x5a, 0x95, + 0x79, 0x89, 0xeb, 0x79, 0xc9, 0x4b, 0x4a, 0xe0, 0x01, 0x5a, 0xe5, 0x9f, 0x59, 0xf0, 0xda, 0xd0, + 0x2b, 0x14, 0xfa, 0x41, 0xf8, 0xd4, 0x63, 0xa9, 0x44, 0xbc, 0x7a, 0xd6, 0xeb, 0x58, 0x77, 0xf0, + 0x8b, 0xcf, 0xb5, 0xfc, 0xaf, 0x7e, 0x5b, 0x1a, 0xfb, 0xf4, 0x3f, 0xcb, 0x63, 0xe5, 0x2f, 0x2d, + 0xb8, 0x34, 0x44, 0xf7, 0x65, 0x9e, 0xc2, 0x7f, 0x61, 0xc1, 0x3c, 0xeb, 0xdd, 0x74, 0xd3, 0x8e, + 0x6f, 0x9c, 0x61, 0x35, 0x7d, 0x09, 0x54, 0xbb, 0x20, 0x67, 0xea, 0x3e, 0x32, 0xee, 0xb7, 0x5a, + 0xfe, 0xa7, 0x05, 0xb3, 0x1b, 0x4f, 0xa8, 0x7d, 0x9b, 0x3e, 0xde, 0xe6, 0x8d, 0x8f, 0x39, 0x3f, + 0x4c, 0xfe, 0x3f, 0x60, 0x0d, 0xff, 0x7f, 0x00, 0x5d, 0x85, 0x0c, 0xf5, 0x3a, 0xa7, 0xf8, 0x47, + 0x62, 0xca, 0xc4, 0x26, 0xb3, 0xe1, 0x75, 0xb0, 0xd4, 0x91, 0x23, 0x6b, 0x2a, 0x09, 0x55, 0xee, + 0x15, 0xe2, 0x91, 0x35, 0x95, 0xb1, 0x38, 0x2d, 0xab, 0xa6, 0x03, 0xee, 0xb4, 0x65, 0x92, 0x67, + 0x63, 0xf7, 0xee, 0x69, 0x12, 0x0e, 0x79, 0xe5, 0xdf, 0x8f, 0xc3, 0x4c, 0x9d, 0xed, 0x53, 0xbb, + 0x6b, 0x3b, 0x54, 0xad, 0xeb, 0x01, 0xcc, 0xec, 0x13, 0xe6, 0xb4, 0x7d, 0xaa, 0xb7, 0xd0, 0x6c, + 0xdd, 0xbb, 0xa1, 0xd5, 0x9b, 0x49, 0xe6, 0xf3, 0xa3, 0xd2, 0x62, 0x4a, 0x3d, 0xc5, 0xc5, 0x69, + 0x24, 0xf4, 0x08, 0x80, 0x46, 0x41, 0x34, 0x3b, 0xf9, 0xce, 0xc9, 0x3b, 0x99, 0x0e, 0xbc, 0x9e, + 0x9d, 0x62, 0x1a, 0x4e, 0x60, 0xa2, 0x1f, 0xca, 0xc1, 0xac, 0xa9, 0xb6, 0x34, 0x50, 0x7f, 0xdb, + 0x4c, 0xad, 0x56, 0x4e, 0x36, 0xb0, 0x6b, 0x54, 0x14, 0x7c, 0xd4, 0x42, 0x42, 0xaa, 0x1a, 0xe6, + 0xcc, 0xcf, 0xf2, 0x5f, 0xc7, 0x61, 0xf9, 0xa4, 0xe3, 0x56, 0xf6, 0x19, 0x39, 0x2c, 0xf2, 0xb6, + 0x08, 0x9b, 0xb0, 0xbe, 0xc5, 0xaa, 0x3e, 0xb3, 0x9b, 0xe2, 0xe0, 0x1e, 0x49, 0x74, 0x0b, 0x32, + 0x2d, 0x9f, 0x9a, 0xe0, 0x54, 0x4f, 0xf6, 0x3d, 0x15, 0xfd, 0xda, 0xa4, 0x4c, 0xa0, 0x6d, 0x9f, + 0x62, 0x09, 0x22, 0xb1, 0x5c, 0xd6, 0x30, 0x2d, 0xeb, 0x6c, 0x58, 0x5b, 0xac, 0x81, 0x25, 0x08, + 0xda, 0x82, 0x6c, 0x8b, 0x07, 0xc2, 0x4c, 0x05, 0x23, 0x83, 0xe5, 0x65, 0xd5, 0x6f, 0xf3, 0x40, + 0x60, 0x05, 0x53, 0xfe, 0x5b, 0x16, 0x4a, 0x27, 0xcc, 0x0d, 0x68, 0x13, 0x16, 0xf4, 0x25, 0x79, + 0x9b, 0xfa, 0x8c, 0x37, 0xd2, 0xb1, 0xbc, 0xa4, 0x2e, 0xb1, 0xfd, 0x6c, 0x3c, 0x48, 0x07, 0x7d, + 0x00, 0x73, 0xcc, 0x13, 0xd4, 0xef, 0x10, 0x27, 0x84, 0xd1, 0xcf, 0x02, 0x0b, 0xfa, 0x75, 0x2e, + 0xc5, 0xc2, 0xbd, 0xb2, 0x03, 0x36, 0x34, 0x73, 0xea, 0x0d, 0x75, 0x60, 0xd6, 0x25, 0x4f, 0x12, + 0xd7, 0x6d, 0x13, 0xc2, 0xe1, 0xff, 0x86, 0xb4, 0x05, 0x73, 0x2a, 0xfa, 0x0f, 0xd3, 0xca, 0xa6, + 0x27, 0xee, 0xf8, 0x3b, 0xc2, 0x67, 0x5e, 0x53, 0x5b, 0xdb, 0x4a, 0x61, 0xe1, 0x1e, 0x6c, 0xf4, + 0x10, 0xf2, 0x2e, 0x79, 0xb2, 0xd3, 0xf6, 0x9b, 0xe1, 0x2d, 0x69, 0x74, 0x3b, 0xea, 0xcd, 0x67, + 0xcb, 0xa0, 0xe0, 0x08, 0x2f, 0x4c, 0xcd, 0xc9, 0x57, 0x91, 0x9a, 0x61, 0x3a, 0xe5, 0x5f, 0x4d, + 0x3a, 0x7d, 0x66, 0xc1, 0x74, 0xb2, 0x8a, 0xfb, 0x7b, 0xa7, 0x35, 0x42, 0xef, 0xfc, 0x36, 0x8c, + 0x0b, 0x6e, 0x4a, 0xf0, 0x54, 0x27, 0x3d, 0x18, 0xd8, 0xf1, 0x5d, 0x8e, 0xc7, 0x05, 0xaf, 0xad, + 0x3c, 0x7d, 0xb6, 0x34, 0xf6, 0xf9, 0xb3, 0xa5, 0xb1, 0x2f, 0x9e, 0x2d, 0x8d, 0x7d, 0x7a, 0xbc, + 0x64, 0x3d, 0x3d, 0x5e, 0xb2, 0x3e, 0x3f, 0x5e, 0xb2, 0xbe, 0x38, 0x5e, 0xb2, 0xbe, 0x3c, 0x5e, + 0xb2, 0x7e, 0xf9, 0xdf, 0xa5, 0xb1, 0x87, 0xe3, 0x9d, 0xcb, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, + 0x19, 0x34, 0x0f, 0xd6, 0x4b, 0x20, 0x00, 0x00, +} + +func (m *CustomDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomDeploymentStrategyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Environment) > 0 { + for iNdEx := len(m.Environment) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Environment[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ImageTrigger != nil { + { + size, err := m.ImageTrigger.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentCauseImageTrigger) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCauseImageTrigger) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCauseImageTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentConfigList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentConfigRollback) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigRollback) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.UpdatedAnnotations) > 0 { + keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) + for k := range m.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForUpdatedAnnotations[iNdEx]) + copy(dAtA[i:], keysForUpdatedAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUpdatedAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentConfigRollbackSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigRollbackSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigRollbackSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.IncludeStrategy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i-- + if m.IncludeReplicationMeta { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + i-- + if m.IncludeTemplate { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i-- + if m.IncludeTriggers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x10 + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentConfigSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x48 + if m.Template != nil { + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i-- + if m.Test { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x20 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x18 + if m.Triggers != nil { + { + size, err := m.Triggers.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentConfigStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x48 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.Details != nil { + { + size, err := m.Details.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.LatestVersion)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *DeploymentDetails) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentDetails) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Causes) > 0 { + for iNdEx := len(m.Causes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Causes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentLog) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentLog) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentLog) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *DeploymentLogOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentLogOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Version != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Version)) + i-- + dAtA[i] = 0x50 + } + i-- + if m.NoWait { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + if m.LimitBytes != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + i-- + dAtA[i] = 0x40 + } + if m.TailLines != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) + i-- + dAtA[i] = 0x38 + } + i-- + if m.Timestamps { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.SinceTime != nil { + { + size, err := m.SinceTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.SinceSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + i-- + dAtA[i] = 0x20 + } + i-- + if m.Previous { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ExcludeTriggers) > 0 { + for iNdEx := len(m.ExcludeTriggers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeTriggers[iNdEx]) + copy(dAtA[i:], m.ExcludeTriggers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExcludeTriggers[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Latest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActiveDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + i-- + dAtA[i] = 0x40 + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.Labels[string(keysForLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForLabels[iNdEx]) + copy(dAtA[i:], keysForLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if m.RollingParams != nil { + { + size, err := m.RollingParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.RecreateParams != nil { + { + size, err := m.RecreateParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.CustomParams != nil { + { + size, err := m.CustomParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentTriggerImageChangeParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentTriggerImageChangeParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentTriggerImageChangeParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.LastTriggeredImage) + copy(dAtA[i:], m.LastTriggeredImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LastTriggeredImage))) + i-- + dAtA[i] = 0x22 + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.ContainerNames) > 0 { + for iNdEx := len(m.ContainerNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ContainerNames[iNdEx]) + copy(dAtA[i:], m.ContainerNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerNames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i-- + if m.Automatic { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m DeploymentTriggerPolicies) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m DeploymentTriggerPolicies) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m DeploymentTriggerPolicies) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeploymentTriggerPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentTriggerPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentTriggerPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ImageChangeParams != nil { + { + size, err := m.ImageChangeParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExecNewPodHook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecNewPodHook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecNewPodHook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Volumes[iNdEx]) + copy(dAtA[i:], m.Volumes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0x1a + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LifecycleHook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LifecycleHook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LifecycleHook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TagImages) > 0 { + for iNdEx := len(m.TagImages) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TagImages[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ExecNewPod != nil { + { + size, err := m.ExecNewPod.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.FailurePolicy) + copy(dAtA[i:], m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FailurePolicy))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RecreateDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RecreateDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RecreateDeploymentStrategyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Post != nil { + { + size, err := m.Post.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Mid != nil { + { + size, err := m.Mid.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Pre != nil { + { + size, err := m.Pre.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RollingDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingDeploymentStrategyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Post != nil { + { + size, err := m.Post.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Pre != nil { + { + size, err := m.Pre.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x18 + } + if m.IntervalSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.IntervalSeconds)) + i-- + dAtA[i] = 0x10 + } + if m.UpdatePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.UpdatePeriodSeconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TagImageHook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagImageHook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagImageHook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CustomDeploymentStrategyParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Environment) > 0 { + for _, e := range m.Environment { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.ImageTrigger != nil { + l = m.ImageTrigger.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeploymentCauseImageTrigger) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentConfigList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentConfigRollback) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentConfigRollbackSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Revision)) + n += 2 + n += 2 + n += 2 + n += 2 + return n +} + +func (m *DeploymentConfigSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Triggers != nil { + l = m.Triggers.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Replicas)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + n += 2 + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *DeploymentConfigStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.LatestVersion)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if m.Details != nil { + l = m.Details.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + return n +} + +func (m *DeploymentDetails) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Causes) > 0 { + for _, e := range m.Causes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentLog) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *DeploymentLogOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) + } + n += 2 + if m.Version != nil { + n += 1 + sovGenerated(uint64(*m.Version)) + } + return n +} + +func (m *DeploymentRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if len(m.ExcludeTriggers) > 0 { + for _, s := range m.ExcludeTriggers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.CustomParams != nil { + l = m.CustomParams.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RecreateParams != nil { + l = m.RecreateParams.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RollingParams != nil { + l = m.RollingParams.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + return n +} + +func (m *DeploymentTriggerImageChangeParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if len(m.ContainerNames) > 0 { + for _, s := range m.ContainerNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.LastTriggeredImage) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m DeploymentTriggerPolicies) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, e := range m { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentTriggerPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.ImageChangeParams != nil { + l = m.ImageChangeParams.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ExecNewPodHook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LifecycleHook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExecNewPod != nil { + l = m.ExecNewPod.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TagImages) > 0 { + for _, e := range m.TagImages { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RecreateDeploymentStrategyParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if m.Pre != nil { + l = m.Pre.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Mid != nil { + l = m.Mid.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Post != nil { + l = m.Post.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingDeploymentStrategyParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UpdatePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.UpdatePeriodSeconds)) + } + if m.IntervalSeconds != nil { + n += 1 + sovGenerated(uint64(*m.IntervalSeconds)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pre != nil { + l = m.Pre.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Post != nil { + l = m.Post.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TagImageHook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CustomDeploymentStrategyParams) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnvironment := "[]EnvVar{" + for _, f := range this.Environment { + repeatedStringForEnvironment += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnvironment += "}" + s := strings.Join([]string{`&CustomDeploymentStrategyParams{`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Environment:` + repeatedStringForEnvironment + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCause{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ImageTrigger:` + strings.Replace(this.ImageTrigger.String(), "DeploymentCauseImageTrigger", "DeploymentCauseImageTrigger", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCauseImageTrigger) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCauseImageTrigger{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentConfig{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentConfigSpec", "DeploymentConfigSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentConfigStatus", "DeploymentConfigStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DeploymentConfig{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeploymentConfig", "DeploymentConfig", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeploymentConfigList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigRollback) String() string { + if this == nil { + return "nil" + } + keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) + for k := range this.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + mapStringForUpdatedAnnotations := "map[string]string{" + for _, k := range keysForUpdatedAnnotations { + mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) + } + mapStringForUpdatedAnnotations += "}" + s := strings.Join([]string{`&DeploymentConfigRollback{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentConfigRollbackSpec", "DeploymentConfigRollbackSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigRollbackSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentConfigRollbackSpec{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `IncludeTriggers:` + fmt.Sprintf("%v", this.IncludeTriggers) + `,`, + `IncludeTemplate:` + fmt.Sprintf("%v", this.IncludeTemplate) + `,`, + `IncludeReplicationMeta:` + fmt.Sprintf("%v", this.IncludeReplicationMeta) + `,`, + `IncludeStrategy:` + fmt.Sprintf("%v", this.IncludeStrategy) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigSpec) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&DeploymentConfigSpec{`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `Triggers:` + strings.Replace(fmt.Sprintf("%v", this.Triggers), "DeploymentTriggerPolicies", "DeploymentTriggerPolicies", 1) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Test:` + fmt.Sprintf("%v", this.Test) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `Selector:` + mapStringForSelector + `,`, + `Template:` + strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v1.PodTemplateSpec", 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DeploymentConfigStatus{`, + `LatestVersion:` + fmt.Sprintf("%v", this.LatestVersion) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Details:` + strings.Replace(this.Details.String(), "DeploymentDetails", "DeploymentDetails", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentDetails) String() string { + if this == nil { + return "nil" + } + repeatedStringForCauses := "[]DeploymentCause{" + for _, f := range this.Causes { + repeatedStringForCauses += strings.Replace(strings.Replace(f.String(), "DeploymentCause", "DeploymentCause", 1), `&`, ``, 1) + "," + } + repeatedStringForCauses += "}" + s := strings.Join([]string{`&DeploymentDetails{`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Causes:` + repeatedStringForCauses + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentLog) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentLog{`, + `}`, + }, "") + return s +} +func (this *DeploymentLogOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentLogOptions{`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, + `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "v11.Time", 1) + `,`, + `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, + `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, + `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`, + `Version:` + valueToStringGenerated(this.Version) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Latest:` + fmt.Sprintf("%v", this.Latest) + `,`, + `Force:` + fmt.Sprintf("%v", this.Force) + `,`, + `ExcludeTriggers:` + fmt.Sprintf("%v", this.ExcludeTriggers) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `CustomParams:` + strings.Replace(this.CustomParams.String(), "CustomDeploymentStrategyParams", "CustomDeploymentStrategyParams", 1) + `,`, + `RecreateParams:` + strings.Replace(this.RecreateParams.String(), "RecreateDeploymentStrategyParams", "RecreateDeploymentStrategyParams", 1) + `,`, + `RollingParams:` + strings.Replace(this.RollingParams.String(), "RollingDeploymentStrategyParams", "RollingDeploymentStrategyParams", 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentTriggerImageChangeParams) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentTriggerImageChangeParams{`, + `Automatic:` + fmt.Sprintf("%v", this.Automatic) + `,`, + `ContainerNames:` + fmt.Sprintf("%v", this.ContainerNames) + `,`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `LastTriggeredImage:` + fmt.Sprintf("%v", this.LastTriggeredImage) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentTriggerPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentTriggerPolicy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ImageChangeParams:` + strings.Replace(this.ImageChangeParams.String(), "DeploymentTriggerImageChangeParams", "DeploymentTriggerImageChangeParams", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExecNewPodHook) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&ExecNewPodHook{`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, + `}`, + }, "") + return s +} +func (this *LifecycleHook) String() string { + if this == nil { + return "nil" + } + repeatedStringForTagImages := "[]TagImageHook{" + for _, f := range this.TagImages { + repeatedStringForTagImages += strings.Replace(strings.Replace(f.String(), "TagImageHook", "TagImageHook", 1), `&`, ``, 1) + "," + } + repeatedStringForTagImages += "}" + s := strings.Join([]string{`&LifecycleHook{`, + `FailurePolicy:` + fmt.Sprintf("%v", this.FailurePolicy) + `,`, + `ExecNewPod:` + strings.Replace(this.ExecNewPod.String(), "ExecNewPodHook", "ExecNewPodHook", 1) + `,`, + `TagImages:` + repeatedStringForTagImages + `,`, + `}`, + }, "") + return s +} +func (this *RecreateDeploymentStrategyParams) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RecreateDeploymentStrategyParams{`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `Pre:` + strings.Replace(this.Pre.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Mid:` + strings.Replace(this.Mid.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Post:` + strings.Replace(this.Post.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingDeploymentStrategyParams) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingDeploymentStrategyParams{`, + `UpdatePeriodSeconds:` + valueToStringGenerated(this.UpdatePeriodSeconds) + `,`, + `IntervalSeconds:` + valueToStringGenerated(this.IntervalSeconds) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `Pre:` + strings.Replace(this.Pre.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Post:` + strings.Replace(this.Post.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TagImageHook) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagImageHook{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomDeploymentStrategyParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomDeploymentStrategyParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Environment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Environment = append(m.Environment, v1.EnvVar{}) + if err := m.Environment[len(m.Environment)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentTriggerType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageTrigger", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageTrigger == nil { + m.ImageTrigger = &DeploymentCauseImageTrigger{} + } + if err := m.ImageTrigger.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCauseImageTrigger) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCauseImageTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCauseImageTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DeploymentConfig{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigRollback: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigRollback: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.UpdatedAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigRollbackSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigRollbackSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeTriggers", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeTriggers = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeTemplate", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeTemplate = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeReplicationMeta", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeReplicationMeta = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeStrategy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeStrategy = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Triggers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Triggers == nil { + m.Triggers = DeploymentTriggerPolicies{} + } + if err := m.Triggers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Test", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Test = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Template == nil { + m.Template = &v1.PodTemplateSpec{} + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestVersion", wireType) + } + m.LatestVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LatestVersion |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Details == nil { + m.Details = &DeploymentDetails{} + } + if err := m.Details.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentDetails) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentDetails: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentDetails: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Causes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Causes = append(m.Causes, DeploymentCause{}) + if err := m.Causes[len(m.Causes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentLog) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentLog: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentLog: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentLogOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentLogOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Follow = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Previous = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SinceSeconds = &v + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SinceTime == nil { + m.SinceTime = &v11.Time{} + } + if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Timestamps = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TailLines = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LimitBytes = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoWait = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Version = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Latest", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Latest = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTriggers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludeTriggers = append(m.ExcludeTriggers, DeploymentTriggerType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CustomParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CustomParams == nil { + m.CustomParams = &CustomDeploymentStrategyParams{} + } + if err := m.CustomParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RecreateParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RecreateParams == nil { + m.RecreateParams = &RecreateDeploymentStrategyParams{} + } + if err := m.RecreateParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingParams == nil { + m.RollingParams = &RollingDeploymentStrategyParams{} + } + if err := m.RollingParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentTriggerImageChangeParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentTriggerImageChangeParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Automatic", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Automatic = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerNames = append(m.ContainerNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTriggeredImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastTriggeredImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentTriggerPolicies) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentTriggerPolicies: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentTriggerPolicies: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, DeploymentTriggerPolicy{}) + if err := (*m)[len(*m)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentTriggerPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentTriggerPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentTriggerPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentTriggerType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageChangeParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageChangeParams == nil { + m.ImageChangeParams = &DeploymentTriggerImageChangeParams{} + } + if err := m.ImageChangeParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecNewPodHook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecNewPodHook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LifecycleHook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LifecycleHook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LifecycleHook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FailurePolicy = LifecycleHookFailurePolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecNewPod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExecNewPod == nil { + m.ExecNewPod = &ExecNewPodHook{} + } + if err := m.ExecNewPod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TagImages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TagImages = append(m.TagImages, TagImageHook{}) + if err := m.TagImages[len(m.TagImages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RecreateDeploymentStrategyParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RecreateDeploymentStrategyParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pre", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pre == nil { + m.Pre = &LifecycleHook{} + } + if err := m.Pre.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mid", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Mid == nil { + m.Mid = &LifecycleHook{} + } + if err := m.Mid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Post", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Post == nil { + m.Post = &LifecycleHook{} + } + if err := m.Post.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingDeploymentStrategyParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingDeploymentStrategyParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UpdatePeriodSeconds = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntervalSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IntervalSeconds = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pre", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pre == nil { + m.Pre = &LifecycleHook{} + } + if err := m.Pre.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Post", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Post == nil { + m.Post = &LifecycleHook{} + } + if err := m.Post.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagImageHook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagImageHook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagImageHook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/openshift/api/apps/v1/generated.proto b/vendor/github.com/openshift/api/apps/v1/generated.proto new file mode 100644 index 0000000000000000000000000000000000000000..d15f20c0d4f0c09b58dc631e113388c1869ce52b --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/generated.proto @@ -0,0 +1,466 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package github.com.openshift.api.apps.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// CustomDeploymentStrategyParams are the input to the Custom deployment strategy. +message CustomDeploymentStrategyParams { + // Image specifies a container image which can carry out a deployment. + optional string image = 1; + + // Environment holds the environment which will be given to the container for Image. + repeated k8s.io.api.core.v1.EnvVar environment = 2; + + // Command is optional and overrides CMD in the container Image. + repeated string command = 3; +} + +// DeploymentCause captures information about a particular cause of a deployment. +message DeploymentCause { + // Type of the trigger that resulted in the creation of a new deployment + optional string type = 1; + + // ImageTrigger contains the image trigger details, if this trigger was fired based on an image change + optional DeploymentCauseImageTrigger imageTrigger = 2; +} + +// DeploymentCauseImageTrigger represents details about the cause of a deployment originating +// from an image change trigger +message DeploymentCauseImageTrigger { + // From is a reference to the changed object which triggered a deployment. The field may have + // the kinds DockerImage, ImageStreamTag, or ImageStreamImage. + optional k8s.io.api.core.v1.ObjectReference from = 1; +} + +// DeploymentCondition describes the state of a deployment config at a certain point. +message DeploymentCondition { + // Type of deployment condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // The last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// Deployment Configs define the template for a pod and manages deploying new images or configuration changes. +// A single deployment configuration is usually analogous to a single micro-service. Can support many different +// deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as +// well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller. +// +// A deployment is "triggered" when its configuration is changed or a tag in an Image Stream is changed. +// Triggers can be disabled to allow manual control over a deployment. The "strategy" determines how the deployment +// is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment +// is triggered by any means. +message DeploymentConfig { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec represents a desired deployment state and how to deploy to it. + optional DeploymentConfigSpec spec = 2; + + // Status represents the current deployment state. + // +optional + optional DeploymentConfigStatus status = 3; +} + +// DeploymentConfigList is a collection of deployment configs. +message DeploymentConfigList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of deployment configs + repeated DeploymentConfig items = 2; +} + +// DeploymentConfigRollback provides the input to rollback generation. +message DeploymentConfigRollback { + // Name of the deployment config that will be rolled back. + optional string name = 1; + + // UpdatedAnnotations is a set of new annotations that will be added in the deployment config. + map<string, string> updatedAnnotations = 2; + + // Spec defines the options to rollback generation. + optional DeploymentConfigRollbackSpec spec = 3; +} + +// DeploymentConfigRollbackSpec represents the options for rollback generation. +message DeploymentConfigRollbackSpec { + // From points to a ReplicationController which is a deployment. + optional k8s.io.api.core.v1.ObjectReference from = 1; + + // Revision to rollback to. If set to 0, rollback to the last revision. + optional int64 revision = 2; + + // IncludeTriggers specifies whether to include config Triggers. + optional bool includeTriggers = 3; + + // IncludeTemplate specifies whether to include the PodTemplateSpec. + optional bool includeTemplate = 4; + + // IncludeReplicationMeta specifies whether to include the replica count and selector. + optional bool includeReplicationMeta = 5; + + // IncludeStrategy specifies whether to include the deployment Strategy. + optional bool includeStrategy = 6; +} + +// DeploymentConfigSpec represents the desired state of the deployment. +message DeploymentConfigSpec { + // Strategy describes how a deployment is executed. + // +optional + optional DeploymentStrategy strategy = 1; + + // MinReadySeconds is the minimum number of seconds for which a newly created pod should + // be ready without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + optional int32 minReadySeconds = 9; + + // Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers + // are defined, a new deployment can only occur as a result of an explicit client update to the + // DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger. + // +optional + optional DeploymentTriggerPolicies triggers = 2; + + // Replicas is the number of desired replicas. + // +optional + optional int32 replicas = 3; + + // RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. + // This field is a pointer to allow for differentiation between an explicit zero and not specified. + // Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.) + optional int32 revisionHistoryLimit = 4; + + // Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the + // deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding + // or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action. + // +optional + optional bool test = 5; + + // Paused indicates that the deployment config is paused resulting in no new deployments on template + // changes or changes in the template caused by other triggers. + optional bool paused = 6; + + // Selector is a label query over pods that should match the Replicas count. + map<string, string> selector = 7; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + optional k8s.io.api.core.v1.PodTemplateSpec template = 8; +} + +// DeploymentConfigStatus represents the current deployment state. +message DeploymentConfigStatus { + // LatestVersion is used to determine whether the current deployment associated with a deployment + // config is out of sync. + optional int64 latestVersion = 1; + + // ObservedGeneration is the most recent generation observed by the deployment config controller. + optional int64 observedGeneration = 2; + + // Replicas is the total number of pods targeted by this deployment config. + optional int32 replicas = 3; + + // UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config + // that have the desired template spec. + optional int32 updatedReplicas = 4; + + // AvailableReplicas is the total number of available pods targeted by this deployment config. + optional int32 availableReplicas = 5; + + // UnavailableReplicas is the total number of unavailable pods targeted by this deployment config. + optional int32 unavailableReplicas = 6; + + // Details are the reasons for the update to this deployment config. + // This could be based on a change made by the user or caused by an automatic trigger + optional DeploymentDetails details = 7; + + // Conditions represents the latest available observations of a deployment config's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated DeploymentCondition conditions = 8; + + // Total number of ready pods targeted by this deployment. + optional int32 readyReplicas = 9; +} + +// DeploymentDetails captures information about the causes of a deployment. +message DeploymentDetails { + // Message is the user specified change message, if this deployment was triggered manually by the user + optional string message = 1; + + // Causes are extended data associated with all the causes for creating a new deployment + repeated DeploymentCause causes = 2; +} + +// DeploymentLog represents the logs for a deployment +message DeploymentLog { +} + +// DeploymentLogOptions is the REST options for a deployment log +message DeploymentLogOptions { + // The container for which to stream logs. Defaults to only container if there is one container in the pod. + optional string container = 1; + + // Follow if true indicates that the build log should be streamed until + // the build terminates. + optional bool follow = 2; + + // Return previous deployment logs. Defaults to false. + optional bool previous = 3; + + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + optional int64 sinceSeconds = 4; + + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; + + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + optional bool timestamps = 6; + + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + optional int64 tailLines = 7; + + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + optional int64 limitBytes = 8; + + // NoWait if true causes the call to return immediately even if the deployment + // is not available yet. Otherwise the server will wait until the deployment has started. + // TODO: Fix the tag to 'noWait' in v2 + optional bool nowait = 9; + + // Version of the deployment for which to view logs. + optional int64 version = 10; +} + +// DeploymentRequest is a request to a deployment config for a new deployment. +message DeploymentRequest { + // Name of the deployment config for requesting a new deployment. + optional string name = 1; + + // Latest will update the deployment config with the latest state from all triggers. + optional bool latest = 2; + + // Force will try to force a new deployment to run. If the deployment config is paused, + // then setting this to true will return an Invalid error. + optional bool force = 3; + + // ExcludeTriggers instructs the instantiator to avoid processing the specified triggers. + // This field overrides the triggers from latest and allows clients to control specific + // logic. This field is ignored if not specified. + repeated string excludeTriggers = 4; +} + +// DeploymentStrategy describes how to perform a deployment. +message DeploymentStrategy { + // Type is the name of a deployment strategy. + optional string type = 1; + + // CustomParams are the input to the Custom deployment strategy, and may also + // be specified for the Recreate and Rolling strategies to customize the execution + // process that runs the deployment. + optional CustomDeploymentStrategyParams customParams = 2; + + // RecreateParams are the input to the Recreate deployment strategy. + optional RecreateDeploymentStrategyParams recreateParams = 3; + + // RollingParams are the input to the Rolling deployment strategy. + optional RollingDeploymentStrategyParams rollingParams = 4; + + // Resources contains resource requirements to execute the deployment and any hooks. + optional k8s.io.api.core.v1.ResourceRequirements resources = 5; + + // Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + map<string, string> labels = 6; + + // Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + map<string, string> annotations = 7; + + // ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment + // config may be active on a node before the system actively tries to terminate them. + optional int64 activeDeadlineSeconds = 8; +} + +// DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger. +message DeploymentTriggerImageChangeParams { + // Automatic means that the detection of a new tag value should result in an image update + // inside the pod template. + optional bool automatic = 1; + + // ContainerNames is used to restrict tag updates to the specified set of container names in a pod. + // If multiple triggers point to the same containers, the resulting behavior is undefined. Future + // API versions will make this a validation error. If ContainerNames does not point to a valid container, + // the trigger will be ignored. Future API versions will make this a validation error. + repeated string containerNames = 2; + + // From is a reference to an image stream tag to watch for changes. From.Name is the only + // required subfield - if From.Namespace is blank, the namespace of the current deployment + // trigger will be used. + optional k8s.io.api.core.v1.ObjectReference from = 3; + + // LastTriggeredImage is the last image to be triggered. + optional string lastTriggeredImage = 4; +} + +// DeploymentTriggerPolicies is a list of policies where nil values and different from empty arrays. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message DeploymentTriggerPolicies { + // items, if empty, will result in an empty slice + + repeated DeploymentTriggerPolicy items = 1; +} + +// DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment. +message DeploymentTriggerPolicy { + // Type of the trigger + optional string type = 1; + + // ImageChangeParams represents the parameters for the ImageChange trigger. + optional DeploymentTriggerImageChangeParams imageChangeParams = 2; +} + +// ExecNewPodHook is a hook implementation which runs a command in a new pod +// based on the specified container which is assumed to be part of the +// deployment template. +message ExecNewPodHook { + // Command is the action command and its arguments. + repeated string command = 1; + + // Env is a set of environment variables to supply to the hook pod's container. + repeated k8s.io.api.core.v1.EnvVar env = 2; + + // ContainerName is the name of a container in the deployment pod template + // whose container image will be used for the hook pod's container. + optional string containerName = 3; + + // Volumes is a list of named volumes from the pod template which should be + // copied to the hook pod. Volumes names not found in pod spec are ignored. + // An empty list means no volumes will be copied. + repeated string volumes = 4; +} + +// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time. +message LifecycleHook { + // FailurePolicy specifies what action to take if the hook fails. + optional string failurePolicy = 1; + + // ExecNewPod specifies the options for a lifecycle hook backed by a pod. + optional ExecNewPodHook execNewPod = 2; + + // TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. + repeated TagImageHook tagImages = 3; +} + +// RecreateDeploymentStrategyParams are the input to the Recreate deployment +// strategy. +message RecreateDeploymentStrategyParams { + // TimeoutSeconds is the time to wait for updates before giving up. If the + // value is nil, a default will be used. + optional int64 timeoutSeconds = 1; + + // Pre is a lifecycle hook which is executed before the strategy manipulates + // the deployment. All LifecycleHookFailurePolicy values are supported. + optional LifecycleHook pre = 2; + + // Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new + // pod is created. All LifecycleHookFailurePolicy values are supported. + optional LifecycleHook mid = 3; + + // Post is a lifecycle hook which is executed after the strategy has + // finished all deployment logic. All LifecycleHookFailurePolicy values are supported. + optional LifecycleHook post = 4; +} + +// RollingDeploymentStrategyParams are the input to the Rolling deployment +// strategy. +message RollingDeploymentStrategyParams { + // UpdatePeriodSeconds is the time to wait between individual pod updates. + // If the value is nil, a default will be used. + optional int64 updatePeriodSeconds = 1; + + // IntervalSeconds is the time to wait between polling deployment status + // after update. If the value is nil, a default will be used. + optional int64 intervalSeconds = 2; + + // TimeoutSeconds is the time to wait for updates before giving up. If the + // value is nil, a default will be used. + optional int64 timeoutSeconds = 3; + + // MaxUnavailable is the maximum number of pods that can be unavailable + // during the update. Value can be an absolute number (ex: 5) or a + // percentage of total pods at the start of update (ex: 10%). Absolute + // number is calculated from percentage by rounding down. + // + // This cannot be 0 if MaxSurge is 0. By default, 25% is used. + // + // Example: when this is set to 30%, the old RC can be scaled down by 30% + // immediately when the rolling update starts. Once new pods are ready, old + // RC can be scaled down further, followed by scaling up the new RC, + // ensuring that at least 70% of original number of pods are available at + // all times during the update. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 4; + + // MaxSurge is the maximum number of pods that can be scheduled above the + // original number of pods. Value can be an absolute number (ex: 5) or a + // percentage of total pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // + // This cannot be 0 if MaxUnavailable is 0. By default, 25% is used. + // + // Example: when this is set to 30%, the new RC can be scaled up by 30% + // immediately when the rolling update starts. Once old pods have been + // killed, new RC can be scaled up further, ensuring that total number of + // pods running at any time during the update is atmost 130% of original + // pods. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 5; + + // Pre is a lifecycle hook which is executed before the deployment process + // begins. All LifecycleHookFailurePolicy values are supported. + optional LifecycleHook pre = 7; + + // Post is a lifecycle hook which is executed after the strategy has + // finished all deployment logic. All LifecycleHookFailurePolicy values + // are supported. + optional LifecycleHook post = 8; +} + +// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag. +message TagImageHook { + // ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single + // container this value will be defaulted to the name of that container. + optional string containerName = 1; + + // To is the target ImageStreamTag to set the container's image onto. + optional k8s.io.api.core.v1.ObjectReference to = 2; +} + diff --git a/vendor/github.com/openshift/api/apps/v1/legacy.go b/vendor/github.com/openshift/api/apps/v1/legacy.go new file mode 100644 index 0000000000000000000000000000000000000000..c8fa0ed999f39264e9cee06fedbb3e5c88c8358d --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/legacy.go @@ -0,0 +1,28 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &DeploymentConfig{}, + &DeploymentConfigList{}, + &DeploymentConfigRollback{}, + &DeploymentRequest{}, + &DeploymentLog{}, + &DeploymentLogOptions{}, + &extensionsv1beta1.Scale{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/apps/v1/register.go b/vendor/github.com/openshift/api/apps/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..0c1e47e6d46ef99d9f783d8f6572e5d6133fa1ad --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/register.go @@ -0,0 +1,45 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "apps.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &DeploymentConfig{}, + &DeploymentConfigList{}, + &DeploymentConfigRollback{}, + &DeploymentRequest{}, + &DeploymentLog{}, + &DeploymentLogOptions{}, + &extensionsv1beta1.Scale{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/apps/v1/types.go b/vendor/github.com/openshift/api/apps/v1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..ed147807d08d31d69f56f72f11b40d2979488faa --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/types.go @@ -0,0 +1,493 @@ +package v1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +genclient +// +genclient:method=Instantiate,verb=create,subresource=instantiate,input=DeploymentRequest +// +genclient:method=Rollback,verb=create,subresource=rollback,input=DeploymentConfigRollback +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Deployment Configs define the template for a pod and manages deploying new images or configuration changes. +// A single deployment configuration is usually analogous to a single micro-service. Can support many different +// deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as +// well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller. +// +// A deployment is "triggered" when its configuration is changed or a tag in an Image Stream is changed. +// Triggers can be disabled to allow manual control over a deployment. The "strategy" determines how the deployment +// is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment +// is triggered by any means. +type DeploymentConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec represents a desired deployment state and how to deploy to it. + Spec DeploymentConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status represents the current deployment state. + // +optional + Status DeploymentConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentConfigSpec represents the desired state of the deployment. +type DeploymentConfigSpec struct { + // Strategy describes how a deployment is executed. + // +optional + Strategy DeploymentStrategy `json:"strategy" protobuf:"bytes,1,opt,name=strategy"` + + // MinReadySeconds is the minimum number of seconds for which a newly created pod should + // be ready without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` + + // Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers + // are defined, a new deployment can only occur as a result of an explicit client update to the + // DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger. + // +optional + Triggers DeploymentTriggerPolicies `json:"triggers" protobuf:"bytes,2,rep,name=triggers"` + + // Replicas is the number of desired replicas. + // +optional + Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` + + // RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. + // This field is a pointer to allow for differentiation between an explicit zero and not specified. + // Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.) + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,4,opt,name=revisionHistoryLimit"` + + // Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the + // deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding + // or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action. + // +optional + Test bool `json:"test" protobuf:"varint,5,opt,name=test"` + + // Paused indicates that the deployment config is paused resulting in no new deployments on template + // changes or changes in the template caused by other triggers. + Paused bool `json:"paused,omitempty" protobuf:"varint,6,opt,name=paused"` + + // Selector is a label query over pods that should match the Replicas count. + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,7,rep,name=selector"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + Template *corev1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,8,opt,name=template"` +} + +// DeploymentStrategy describes how to perform a deployment. +type DeploymentStrategy struct { + // Type is the name of a deployment strategy. + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` + + // CustomParams are the input to the Custom deployment strategy, and may also + // be specified for the Recreate and Rolling strategies to customize the execution + // process that runs the deployment. + CustomParams *CustomDeploymentStrategyParams `json:"customParams,omitempty" protobuf:"bytes,2,opt,name=customParams"` + // RecreateParams are the input to the Recreate deployment strategy. + RecreateParams *RecreateDeploymentStrategyParams `json:"recreateParams,omitempty" protobuf:"bytes,3,opt,name=recreateParams"` + // RollingParams are the input to the Rolling deployment strategy. + RollingParams *RollingDeploymentStrategyParams `json:"rollingParams,omitempty" protobuf:"bytes,4,opt,name=rollingParams"` + + // Resources contains resource requirements to execute the deployment and any hooks. + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,5,opt,name=resources"` + // Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,6,rep,name=labels"` + // Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,7,rep,name=annotations"` + + // ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment + // config may be active on a node before the system actively tries to terminate them. + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=activeDeadlineSeconds"` +} + +// DeploymentStrategyType refers to a specific DeploymentStrategy implementation. +type DeploymentStrategyType string + +const ( + // DeploymentStrategyTypeRecreate is a simple strategy suitable as a default. + DeploymentStrategyTypeRecreate DeploymentStrategyType = "Recreate" + // DeploymentStrategyTypeCustom is a user defined strategy. + DeploymentStrategyTypeCustom DeploymentStrategyType = "Custom" + // DeploymentStrategyTypeRolling uses the Kubernetes RollingUpdater. + DeploymentStrategyTypeRolling DeploymentStrategyType = "Rolling" +) + +// CustomDeploymentStrategyParams are the input to the Custom deployment strategy. +type CustomDeploymentStrategyParams struct { + // Image specifies a container image which can carry out a deployment. + Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"` + // Environment holds the environment which will be given to the container for Image. + Environment []corev1.EnvVar `json:"environment,omitempty" protobuf:"bytes,2,rep,name=environment"` + // Command is optional and overrides CMD in the container Image. + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` +} + +// RecreateDeploymentStrategyParams are the input to the Recreate deployment +// strategy. +type RecreateDeploymentStrategyParams struct { + // TimeoutSeconds is the time to wait for updates before giving up. If the + // value is nil, a default will be used. + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"` + // Pre is a lifecycle hook which is executed before the strategy manipulates + // the deployment. All LifecycleHookFailurePolicy values are supported. + Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,2,opt,name=pre"` + // Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new + // pod is created. All LifecycleHookFailurePolicy values are supported. + Mid *LifecycleHook `json:"mid,omitempty" protobuf:"bytes,3,opt,name=mid"` + // Post is a lifecycle hook which is executed after the strategy has + // finished all deployment logic. All LifecycleHookFailurePolicy values are supported. + Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,4,opt,name=post"` +} + +// RollingDeploymentStrategyParams are the input to the Rolling deployment +// strategy. +type RollingDeploymentStrategyParams struct { + // UpdatePeriodSeconds is the time to wait between individual pod updates. + // If the value is nil, a default will be used. + UpdatePeriodSeconds *int64 `json:"updatePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=updatePeriodSeconds"` + // IntervalSeconds is the time to wait between polling deployment status + // after update. If the value is nil, a default will be used. + IntervalSeconds *int64 `json:"intervalSeconds,omitempty" protobuf:"varint,2,opt,name=intervalSeconds"` + // TimeoutSeconds is the time to wait for updates before giving up. If the + // value is nil, a default will be used. + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"` + // MaxUnavailable is the maximum number of pods that can be unavailable + // during the update. Value can be an absolute number (ex: 5) or a + // percentage of total pods at the start of update (ex: 10%). Absolute + // number is calculated from percentage by rounding down. + // + // This cannot be 0 if MaxSurge is 0. By default, 25% is used. + // + // Example: when this is set to 30%, the old RC can be scaled down by 30% + // immediately when the rolling update starts. Once new pods are ready, old + // RC can be scaled down further, followed by scaling up the new RC, + // ensuring that at least 70% of original number of pods are available at + // all times during the update. + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,4,opt,name=maxUnavailable"` + // MaxSurge is the maximum number of pods that can be scheduled above the + // original number of pods. Value can be an absolute number (ex: 5) or a + // percentage of total pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // + // This cannot be 0 if MaxUnavailable is 0. By default, 25% is used. + // + // Example: when this is set to 30%, the new RC can be scaled up by 30% + // immediately when the rolling update starts. Once old pods have been + // killed, new RC can be scaled up further, ensuring that total number of + // pods running at any time during the update is atmost 130% of original + // pods. + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,5,opt,name=maxSurge"` + // Pre is a lifecycle hook which is executed before the deployment process + // begins. All LifecycleHookFailurePolicy values are supported. + Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,7,opt,name=pre"` + // Post is a lifecycle hook which is executed after the strategy has + // finished all deployment logic. All LifecycleHookFailurePolicy values + // are supported. + Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,8,opt,name=post"` +} + +// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time. +type LifecycleHook struct { + // FailurePolicy specifies what action to take if the hook fails. + FailurePolicy LifecycleHookFailurePolicy `json:"failurePolicy" protobuf:"bytes,1,opt,name=failurePolicy,casttype=LifecycleHookFailurePolicy"` + + // ExecNewPod specifies the options for a lifecycle hook backed by a pod. + ExecNewPod *ExecNewPodHook `json:"execNewPod,omitempty" protobuf:"bytes,2,opt,name=execNewPod"` + + // TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. + TagImages []TagImageHook `json:"tagImages,omitempty" protobuf:"bytes,3,rep,name=tagImages"` +} + +// LifecycleHookFailurePolicy describes possibles actions to take if a hook fails. +type LifecycleHookFailurePolicy string + +const ( + // LifecycleHookFailurePolicyRetry means retry the hook until it succeeds. + LifecycleHookFailurePolicyRetry LifecycleHookFailurePolicy = "Retry" + // LifecycleHookFailurePolicyAbort means abort the deployment. + LifecycleHookFailurePolicyAbort LifecycleHookFailurePolicy = "Abort" + // LifecycleHookFailurePolicyIgnore means ignore failure and continue the deployment. + LifecycleHookFailurePolicyIgnore LifecycleHookFailurePolicy = "Ignore" +) + +// ExecNewPodHook is a hook implementation which runs a command in a new pod +// based on the specified container which is assumed to be part of the +// deployment template. +type ExecNewPodHook struct { + // Command is the action command and its arguments. + Command []string `json:"command" protobuf:"bytes,1,rep,name=command"` + // Env is a set of environment variables to supply to the hook pod's container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"` + // ContainerName is the name of a container in the deployment pod template + // whose container image will be used for the hook pod's container. + ContainerName string `json:"containerName" protobuf:"bytes,3,opt,name=containerName"` + // Volumes is a list of named volumes from the pod template which should be + // copied to the hook pod. Volumes names not found in pod spec are ignored. + // An empty list means no volumes will be copied. + Volumes []string `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"` +} + +// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag. +type TagImageHook struct { + // ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single + // container this value will be defaulted to the name of that container. + ContainerName string `json:"containerName" protobuf:"bytes,1,opt,name=containerName"` + // To is the target ImageStreamTag to set the container's image onto. + To corev1.ObjectReference `json:"to" protobuf:"bytes,2,opt,name=to"` +} + +// DeploymentTriggerPolicies is a list of policies where nil values and different from empty arrays. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type DeploymentTriggerPolicies []DeploymentTriggerPolicy + +func (t DeploymentTriggerPolicies) String() string { + return fmt.Sprintf("%v", []DeploymentTriggerPolicy(t)) +} + +// DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment. +type DeploymentTriggerPolicy struct { + // Type of the trigger + Type DeploymentTriggerType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"` + // ImageChangeParams represents the parameters for the ImageChange trigger. + ImageChangeParams *DeploymentTriggerImageChangeParams `json:"imageChangeParams,omitempty" protobuf:"bytes,2,opt,name=imageChangeParams"` +} + +// DeploymentTriggerType refers to a specific DeploymentTriggerPolicy implementation. +type DeploymentTriggerType string + +const ( + // DeploymentTriggerOnImageChange will create new deployments in response to updated tags from + // a container image repository. + DeploymentTriggerOnImageChange DeploymentTriggerType = "ImageChange" + // DeploymentTriggerOnConfigChange will create new deployments in response to changes to + // the ControllerTemplate of a DeploymentConfig. + DeploymentTriggerOnConfigChange DeploymentTriggerType = "ConfigChange" +) + +// DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger. +type DeploymentTriggerImageChangeParams struct { + // Automatic means that the detection of a new tag value should result in an image update + // inside the pod template. + Automatic bool `json:"automatic,omitempty" protobuf:"varint,1,opt,name=automatic"` + // ContainerNames is used to restrict tag updates to the specified set of container names in a pod. + // If multiple triggers point to the same containers, the resulting behavior is undefined. Future + // API versions will make this a validation error. If ContainerNames does not point to a valid container, + // the trigger will be ignored. Future API versions will make this a validation error. + ContainerNames []string `json:"containerNames,omitempty" protobuf:"bytes,2,rep,name=containerNames"` + // From is a reference to an image stream tag to watch for changes. From.Name is the only + // required subfield - if From.Namespace is blank, the namespace of the current deployment + // trigger will be used. + From corev1.ObjectReference `json:"from" protobuf:"bytes,3,opt,name=from"` + // LastTriggeredImage is the last image to be triggered. + LastTriggeredImage string `json:"lastTriggeredImage,omitempty" protobuf:"bytes,4,opt,name=lastTriggeredImage"` +} + +// DeploymentConfigStatus represents the current deployment state. +type DeploymentConfigStatus struct { + // LatestVersion is used to determine whether the current deployment associated with a deployment + // config is out of sync. + LatestVersion int64 `json:"latestVersion" protobuf:"varint,1,opt,name=latestVersion"` + // ObservedGeneration is the most recent generation observed by the deployment config controller. + ObservedGeneration int64 `json:"observedGeneration" protobuf:"varint,2,opt,name=observedGeneration"` + // Replicas is the total number of pods targeted by this deployment config. + Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` + // UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config + // that have the desired template spec. + UpdatedReplicas int32 `json:"updatedReplicas" protobuf:"varint,4,opt,name=updatedReplicas"` + // AvailableReplicas is the total number of available pods targeted by this deployment config. + AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,5,opt,name=availableReplicas"` + // UnavailableReplicas is the total number of unavailable pods targeted by this deployment config. + UnavailableReplicas int32 `json:"unavailableReplicas" protobuf:"varint,6,opt,name=unavailableReplicas"` + // Details are the reasons for the update to this deployment config. + // This could be based on a change made by the user or caused by an automatic trigger + Details *DeploymentDetails `json:"details,omitempty" protobuf:"bytes,7,opt,name=details"` + // Conditions represents the latest available observations of a deployment config's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,8,rep,name=conditions"` + // Total number of ready pods targeted by this deployment. + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,9,opt,name=readyReplicas"` +} + +// DeploymentDetails captures information about the causes of a deployment. +type DeploymentDetails struct { + // Message is the user specified change message, if this deployment was triggered manually by the user + Message string `json:"message,omitempty" protobuf:"bytes,1,opt,name=message"` + // Causes are extended data associated with all the causes for creating a new deployment + Causes []DeploymentCause `json:"causes" protobuf:"bytes,2,rep,name=causes"` +} + +// DeploymentCause captures information about a particular cause of a deployment. +type DeploymentCause struct { + // Type of the trigger that resulted in the creation of a new deployment + Type DeploymentTriggerType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"` + // ImageTrigger contains the image trigger details, if this trigger was fired based on an image change + ImageTrigger *DeploymentCauseImageTrigger `json:"imageTrigger,omitempty" protobuf:"bytes,2,opt,name=imageTrigger"` +} + +// DeploymentCauseImageTrigger represents details about the cause of a deployment originating +// from an image change trigger +type DeploymentCauseImageTrigger struct { + // From is a reference to the changed object which triggered a deployment. The field may have + // the kinds DockerImage, ImageStreamTag, or ImageStreamImage. + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` +} + +type DeploymentConditionType string + +// These are valid conditions of a DeploymentConfig. +const ( + // DeploymentAvailable means the DeploymentConfig is available, ie. at least the minimum available + // replicas required (dc.spec.replicas in case the DeploymentConfig is of Recreate type, + // dc.spec.replicas - dc.spec.strategy.rollingParams.maxUnavailable in case it's Rolling) are up and + // running for at least dc.spec.minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // DeploymentProgressing is: + // * True: the DeploymentConfig has been successfully deployed or is amidst getting deployed. + // The two different states can be determined by looking at the Reason of the Condition. + // For example, a complete DC will have {Status: True, Reason: NewReplicationControllerAvailable} + // and a DC in the middle of a rollout {Status: True, Reason: ReplicationControllerUpdated}. + // TODO: Represent a successfully deployed DC by using something else for Status like Unknown? + // * False: the DeploymentConfig has failed to deploy its latest version. + // + // This condition is purely informational and depends on the dc.spec.strategy.*params.timeoutSeconds + // field, which is responsible for the time in seconds to wait for a rollout before deciding that + // no progress can be made, thus the rollout is aborted. + // + // Progress for a DeploymentConfig is considered when new pods scale up or old pods scale down. + DeploymentProgressing DeploymentConditionType = "Progressing" + // DeploymentReplicaFailure is added in a deployment config when one of its pods + // fails to be created or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment config at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // The last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentConfigList is a collection of deployment configs. +type DeploymentConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of deployment configs + Items []DeploymentConfig `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentConfigRollback provides the input to rollback generation. +type DeploymentConfigRollback struct { + metav1.TypeMeta `json:",inline"` + // Name of the deployment config that will be rolled back. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // UpdatedAnnotations is a set of new annotations that will be added in the deployment config. + UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"` + // Spec defines the options to rollback generation. + Spec DeploymentConfigRollbackSpec `json:"spec" protobuf:"bytes,3,opt,name=spec"` +} + +// DeploymentConfigRollbackSpec represents the options for rollback generation. +type DeploymentConfigRollbackSpec struct { + // From points to a ReplicationController which is a deployment. + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + // Revision to rollback to. If set to 0, rollback to the last revision. + Revision int64 `json:"revision,omitempty" protobuf:"varint,2,opt,name=revision"` + // IncludeTriggers specifies whether to include config Triggers. + IncludeTriggers bool `json:"includeTriggers" protobuf:"varint,3,opt,name=includeTriggers"` + // IncludeTemplate specifies whether to include the PodTemplateSpec. + IncludeTemplate bool `json:"includeTemplate" protobuf:"varint,4,opt,name=includeTemplate"` + // IncludeReplicationMeta specifies whether to include the replica count and selector. + IncludeReplicationMeta bool `json:"includeReplicationMeta" protobuf:"varint,5,opt,name=includeReplicationMeta"` + // IncludeStrategy specifies whether to include the deployment Strategy. + IncludeStrategy bool `json:"includeStrategy" protobuf:"varint,6,opt,name=includeStrategy"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentRequest is a request to a deployment config for a new deployment. +type DeploymentRequest struct { + metav1.TypeMeta `json:",inline"` + // Name of the deployment config for requesting a new deployment. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Latest will update the deployment config with the latest state from all triggers. + Latest bool `json:"latest" protobuf:"varint,2,opt,name=latest"` + // Force will try to force a new deployment to run. If the deployment config is paused, + // then setting this to true will return an Invalid error. + Force bool `json:"force" protobuf:"varint,3,opt,name=force"` + // ExcludeTriggers instructs the instantiator to avoid processing the specified triggers. + // This field overrides the triggers from latest and allows clients to control specific + // logic. This field is ignored if not specified. + ExcludeTriggers []DeploymentTriggerType `json:"excludeTriggers,omitempty" protobuf:"bytes,4,rep,name=excludeTriggers,casttype=DeploymentTriggerType"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentLog represents the logs for a deployment +type DeploymentLog struct { + metav1.TypeMeta `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentLogOptions is the REST options for a deployment log +type DeploymentLogOptions struct { + metav1.TypeMeta `json:",inline"` + + // The container for which to stream logs. Defaults to only container if there is one container in the pod. + Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"` + // Follow if true indicates that the build log should be streamed until + // the build terminates. + Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"` + // Return previous deployment logs. Defaults to false. + Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"` + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"` + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"` + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"` + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"` + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` + + // NoWait if true causes the call to return immediately even if the deployment + // is not available yet. Otherwise the server will wait until the deployment has started. + // TODO: Fix the tag to 'noWait' in v2 + NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"` + + // Version of the deployment for which to view logs. + Version *int64 `json:"version,omitempty" protobuf:"varint,10,opt,name=version"` +} diff --git a/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..f6ab2fd48d3c0111b18d0b95378fb8aa0a3f5962 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go @@ -0,0 +1,681 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDeploymentStrategyParams) DeepCopyInto(out *CustomDeploymentStrategyParams) { + *out = *in + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDeploymentStrategyParams. +func (in *CustomDeploymentStrategyParams) DeepCopy() *CustomDeploymentStrategyParams { + if in == nil { + return nil + } + out := new(CustomDeploymentStrategyParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCause) DeepCopyInto(out *DeploymentCause) { + *out = *in + if in.ImageTrigger != nil { + in, out := &in.ImageTrigger, &out.ImageTrigger + *out = new(DeploymentCauseImageTrigger) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCause. +func (in *DeploymentCause) DeepCopy() *DeploymentCause { + if in == nil { + return nil + } + out := new(DeploymentCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCauseImageTrigger) DeepCopyInto(out *DeploymentCauseImageTrigger) { + *out = *in + out.From = in.From + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCauseImageTrigger. +func (in *DeploymentCauseImageTrigger) DeepCopy() *DeploymentCauseImageTrigger { + if in == nil { + return nil + } + out := new(DeploymentCauseImageTrigger) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. +func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { + if in == nil { + return nil + } + out := new(DeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfig) DeepCopyInto(out *DeploymentConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfig. +func (in *DeploymentConfig) DeepCopy() *DeploymentConfig { + if in == nil { + return nil + } + out := new(DeploymentConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigList) DeepCopyInto(out *DeploymentConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeploymentConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigList. +func (in *DeploymentConfigList) DeepCopy() *DeploymentConfigList { + if in == nil { + return nil + } + out := new(DeploymentConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigRollback) DeepCopyInto(out *DeploymentConfigRollback) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigRollback. +func (in *DeploymentConfigRollback) DeepCopy() *DeploymentConfigRollback { + if in == nil { + return nil + } + out := new(DeploymentConfigRollback) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentConfigRollback) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigRollbackSpec) DeepCopyInto(out *DeploymentConfigRollbackSpec) { + *out = *in + out.From = in.From + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigRollbackSpec. +func (in *DeploymentConfigRollbackSpec) DeepCopy() *DeploymentConfigRollbackSpec { + if in == nil { + return nil + } + out := new(DeploymentConfigRollbackSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigSpec) DeepCopyInto(out *DeploymentConfigSpec) { + *out = *in + in.Strategy.DeepCopyInto(&out.Strategy) + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make(DeploymentTriggerPolicies, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(corev1.PodTemplateSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigSpec. +func (in *DeploymentConfigSpec) DeepCopy() *DeploymentConfigSpec { + if in == nil { + return nil + } + out := new(DeploymentConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigStatus) DeepCopyInto(out *DeploymentConfigStatus) { + *out = *in + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = new(DeploymentDetails) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigStatus. +func (in *DeploymentConfigStatus) DeepCopy() *DeploymentConfigStatus { + if in == nil { + return nil + } + out := new(DeploymentConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentDetails) DeepCopyInto(out *DeploymentDetails) { + *out = *in + if in.Causes != nil { + in, out := &in.Causes, &out.Causes + *out = make([]DeploymentCause, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentDetails. +func (in *DeploymentDetails) DeepCopy() *DeploymentDetails { + if in == nil { + return nil + } + out := new(DeploymentDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentLog) DeepCopyInto(out *DeploymentLog) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentLog. +func (in *DeploymentLog) DeepCopy() *DeploymentLog { + if in == nil { + return nil + } + out := new(DeploymentLog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentLog) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentLogOptions) DeepCopyInto(out *DeploymentLogOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = (*in).DeepCopy() + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentLogOptions. +func (in *DeploymentLogOptions) DeepCopy() *DeploymentLogOptions { + if in == nil { + return nil + } + out := new(DeploymentLogOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentLogOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentRequest) DeepCopyInto(out *DeploymentRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ExcludeTriggers != nil { + in, out := &in.ExcludeTriggers, &out.ExcludeTriggers + *out = make([]DeploymentTriggerType, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRequest. +func (in *DeploymentRequest) DeepCopy() *DeploymentRequest { + if in == nil { + return nil + } + out := new(DeploymentRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { + *out = *in + if in.CustomParams != nil { + in, out := &in.CustomParams, &out.CustomParams + *out = new(CustomDeploymentStrategyParams) + (*in).DeepCopyInto(*out) + } + if in.RecreateParams != nil { + in, out := &in.RecreateParams, &out.RecreateParams + *out = new(RecreateDeploymentStrategyParams) + (*in).DeepCopyInto(*out) + } + if in.RollingParams != nil { + in, out := &in.RollingParams, &out.RollingParams + *out = new(RollingDeploymentStrategyParams) + (*in).DeepCopyInto(*out) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. +func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { + if in == nil { + return nil + } + out := new(DeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentTriggerImageChangeParams) DeepCopyInto(out *DeploymentTriggerImageChangeParams) { + *out = *in + if in.ContainerNames != nil { + in, out := &in.ContainerNames, &out.ContainerNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.From = in.From + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerImageChangeParams. +func (in *DeploymentTriggerImageChangeParams) DeepCopy() *DeploymentTriggerImageChangeParams { + if in == nil { + return nil + } + out := new(DeploymentTriggerImageChangeParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in DeploymentTriggerPolicies) DeepCopyInto(out *DeploymentTriggerPolicies) { + { + in := &in + *out = make(DeploymentTriggerPolicies, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerPolicies. +func (in DeploymentTriggerPolicies) DeepCopy() DeploymentTriggerPolicies { + if in == nil { + return nil + } + out := new(DeploymentTriggerPolicies) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentTriggerPolicy) DeepCopyInto(out *DeploymentTriggerPolicy) { + *out = *in + if in.ImageChangeParams != nil { + in, out := &in.ImageChangeParams, &out.ImageChangeParams + *out = new(DeploymentTriggerImageChangeParams) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerPolicy. +func (in *DeploymentTriggerPolicy) DeepCopy() *DeploymentTriggerPolicy { + if in == nil { + return nil + } + out := new(DeploymentTriggerPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecNewPodHook) DeepCopyInto(out *ExecNewPodHook) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecNewPodHook. +func (in *ExecNewPodHook) DeepCopy() *ExecNewPodHook { + if in == nil { + return nil + } + out := new(ExecNewPodHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleHook) DeepCopyInto(out *LifecycleHook) { + *out = *in + if in.ExecNewPod != nil { + in, out := &in.ExecNewPod, &out.ExecNewPod + *out = new(ExecNewPodHook) + (*in).DeepCopyInto(*out) + } + if in.TagImages != nil { + in, out := &in.TagImages, &out.TagImages + *out = make([]TagImageHook, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHook. +func (in *LifecycleHook) DeepCopy() *LifecycleHook { + if in == nil { + return nil + } + out := new(LifecycleHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecreateDeploymentStrategyParams) DeepCopyInto(out *RecreateDeploymentStrategyParams) { + *out = *in + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + if in.Pre != nil { + in, out := &in.Pre, &out.Pre + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + if in.Mid != nil { + in, out := &in.Mid, &out.Mid + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + if in.Post != nil { + in, out := &in.Post, &out.Post + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecreateDeploymentStrategyParams. +func (in *RecreateDeploymentStrategyParams) DeepCopy() *RecreateDeploymentStrategyParams { + if in == nil { + return nil + } + out := new(RecreateDeploymentStrategyParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingDeploymentStrategyParams) DeepCopyInto(out *RollingDeploymentStrategyParams) { + *out = *in + if in.UpdatePeriodSeconds != nil { + in, out := &in.UpdatePeriodSeconds, &out.UpdatePeriodSeconds + *out = new(int64) + **out = **in + } + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + *out = new(int64) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } + if in.Pre != nil { + in, out := &in.Pre, &out.Pre + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + if in.Post != nil { + in, out := &in.Post, &out.Post + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingDeploymentStrategyParams. +func (in *RollingDeploymentStrategyParams) DeepCopy() *RollingDeploymentStrategyParams { + if in == nil { + return nil + } + out := new(RollingDeploymentStrategyParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagImageHook) DeepCopyInto(out *TagImageHook) { + *out = *in + out.To = in.To + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagImageHook. +func (in *TagImageHook) DeepCopy() *TagImageHook { + if in == nil { + return nil + } + out := new(TagImageHook) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..9e3a07e8f969b447884702f4f9a132ea2d1a4df8 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,282 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_CustomDeploymentStrategyParams = map[string]string{ + "": "CustomDeploymentStrategyParams are the input to the Custom deployment strategy.", + "image": "Image specifies a container image which can carry out a deployment.", + "environment": "Environment holds the environment which will be given to the container for Image.", + "command": "Command is optional and overrides CMD in the container Image.", +} + +func (CustomDeploymentStrategyParams) SwaggerDoc() map[string]string { + return map_CustomDeploymentStrategyParams +} + +var map_DeploymentCause = map[string]string{ + "": "DeploymentCause captures information about a particular cause of a deployment.", + "type": "Type of the trigger that resulted in the creation of a new deployment", + "imageTrigger": "ImageTrigger contains the image trigger details, if this trigger was fired based on an image change", +} + +func (DeploymentCause) SwaggerDoc() map[string]string { + return map_DeploymentCause +} + +var map_DeploymentCauseImageTrigger = map[string]string{ + "": "DeploymentCauseImageTrigger represents details about the cause of a deployment originating from an image change trigger", + "from": "From is a reference to the changed object which triggered a deployment. The field may have the kinds DockerImage, ImageStreamTag, or ImageStreamImage.", +} + +func (DeploymentCauseImageTrigger) SwaggerDoc() map[string]string { + return map_DeploymentCauseImageTrigger +} + +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment config at a certain point.", + "type": "Type of deployment condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + +var map_DeploymentConfig = map[string]string{ + "": "Deployment Configs define the template for a pod and manages deploying new images or configuration changes. A single deployment configuration is usually analogous to a single micro-service. Can support many different deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.\n\nA deployment is \"triggered\" when its configuration is changed or a tag in an Image Stream is changed. Triggers can be disabled to allow manual control over a deployment. The \"strategy\" determines how the deployment is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment is triggered by any means.", + "spec": "Spec represents a desired deployment state and how to deploy to it.", + "status": "Status represents the current deployment state.", +} + +func (DeploymentConfig) SwaggerDoc() map[string]string { + return map_DeploymentConfig +} + +var map_DeploymentConfigList = map[string]string{ + "": "DeploymentConfigList is a collection of deployment configs.", + "items": "Items is a list of deployment configs", +} + +func (DeploymentConfigList) SwaggerDoc() map[string]string { + return map_DeploymentConfigList +} + +var map_DeploymentConfigRollback = map[string]string{ + "": "DeploymentConfigRollback provides the input to rollback generation.", + "name": "Name of the deployment config that will be rolled back.", + "updatedAnnotations": "UpdatedAnnotations is a set of new annotations that will be added in the deployment config.", + "spec": "Spec defines the options to rollback generation.", +} + +func (DeploymentConfigRollback) SwaggerDoc() map[string]string { + return map_DeploymentConfigRollback +} + +var map_DeploymentConfigRollbackSpec = map[string]string{ + "": "DeploymentConfigRollbackSpec represents the options for rollback generation.", + "from": "From points to a ReplicationController which is a deployment.", + "revision": "Revision to rollback to. If set to 0, rollback to the last revision.", + "includeTriggers": "IncludeTriggers specifies whether to include config Triggers.", + "includeTemplate": "IncludeTemplate specifies whether to include the PodTemplateSpec.", + "includeReplicationMeta": "IncludeReplicationMeta specifies whether to include the replica count and selector.", + "includeStrategy": "IncludeStrategy specifies whether to include the deployment Strategy.", +} + +func (DeploymentConfigRollbackSpec) SwaggerDoc() map[string]string { + return map_DeploymentConfigRollbackSpec +} + +var map_DeploymentConfigSpec = map[string]string{ + "": "DeploymentConfigSpec represents the desired state of the deployment.", + "strategy": "Strategy describes how a deployment is executed.", + "minReadySeconds": "MinReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "triggers": "Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers are defined, a new deployment can only occur as a result of an explicit client update to the DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.", + "replicas": "Replicas is the number of desired replicas.", + "revisionHistoryLimit": "RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. This field is a pointer to allow for differentiation between an explicit zero and not specified. Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)", + "test": "Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.", + "paused": "Paused indicates that the deployment config is paused resulting in no new deployments on template changes or changes in the template caused by other triggers.", + "selector": "Selector is a label query over pods that should match the Replicas count.", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected.", +} + +func (DeploymentConfigSpec) SwaggerDoc() map[string]string { + return map_DeploymentConfigSpec +} + +var map_DeploymentConfigStatus = map[string]string{ + "": "DeploymentConfigStatus represents the current deployment state.", + "latestVersion": "LatestVersion is used to determine whether the current deployment associated with a deployment config is out of sync.", + "observedGeneration": "ObservedGeneration is the most recent generation observed by the deployment config controller.", + "replicas": "Replicas is the total number of pods targeted by this deployment config.", + "updatedReplicas": "UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config that have the desired template spec.", + "availableReplicas": "AvailableReplicas is the total number of available pods targeted by this deployment config.", + "unavailableReplicas": "UnavailableReplicas is the total number of unavailable pods targeted by this deployment config.", + "details": "Details are the reasons for the update to this deployment config. This could be based on a change made by the user or caused by an automatic trigger", + "conditions": "Conditions represents the latest available observations of a deployment config's current state.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", +} + +func (DeploymentConfigStatus) SwaggerDoc() map[string]string { + return map_DeploymentConfigStatus +} + +var map_DeploymentDetails = map[string]string{ + "": "DeploymentDetails captures information about the causes of a deployment.", + "message": "Message is the user specified change message, if this deployment was triggered manually by the user", + "causes": "Causes are extended data associated with all the causes for creating a new deployment", +} + +func (DeploymentDetails) SwaggerDoc() map[string]string { + return map_DeploymentDetails +} + +var map_DeploymentLog = map[string]string{ + "": "DeploymentLog represents the logs for a deployment", +} + +func (DeploymentLog) SwaggerDoc() map[string]string { + return map_DeploymentLog +} + +var map_DeploymentLogOptions = map[string]string{ + "": "DeploymentLogOptions is the REST options for a deployment log", + "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.", + "follow": "Follow if true indicates that the build log should be streamed until the build terminates.", + "previous": "Return previous deployment logs. Defaults to false.", + "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", + "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", + "nowait": "NoWait if true causes the call to return immediately even if the deployment is not available yet. Otherwise the server will wait until the deployment has started.", + "version": "Version of the deployment for which to view logs.", +} + +func (DeploymentLogOptions) SwaggerDoc() map[string]string { + return map_DeploymentLogOptions +} + +var map_DeploymentRequest = map[string]string{ + "": "DeploymentRequest is a request to a deployment config for a new deployment.", + "name": "Name of the deployment config for requesting a new deployment.", + "latest": "Latest will update the deployment config with the latest state from all triggers.", + "force": "Force will try to force a new deployment to run. If the deployment config is paused, then setting this to true will return an Invalid error.", + "excludeTriggers": "ExcludeTriggers instructs the instantiator to avoid processing the specified triggers. This field overrides the triggers from latest and allows clients to control specific logic. This field is ignored if not specified.", +} + +func (DeploymentRequest) SwaggerDoc() map[string]string { + return map_DeploymentRequest +} + +var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to perform a deployment.", + "type": "Type is the name of a deployment strategy.", + "customParams": "CustomParams are the input to the Custom deployment strategy, and may also be specified for the Recreate and Rolling strategies to customize the execution process that runs the deployment.", + "recreateParams": "RecreateParams are the input to the Recreate deployment strategy.", + "rollingParams": "RollingParams are the input to the Rolling deployment strategy.", + "resources": "Resources contains resource requirements to execute the deployment and any hooks.", + "labels": "Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", + "annotations": "Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", + "activeDeadlineSeconds": "ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment config may be active on a node before the system actively tries to terminate them.", +} + +func (DeploymentStrategy) SwaggerDoc() map[string]string { + return map_DeploymentStrategy +} + +var map_DeploymentTriggerImageChangeParams = map[string]string{ + "": "DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger.", + "automatic": "Automatic means that the detection of a new tag value should result in an image update inside the pod template.", + "containerNames": "ContainerNames is used to restrict tag updates to the specified set of container names in a pod. If multiple triggers point to the same containers, the resulting behavior is undefined. Future API versions will make this a validation error. If ContainerNames does not point to a valid container, the trigger will be ignored. Future API versions will make this a validation error.", + "from": "From is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used.", + "lastTriggeredImage": "LastTriggeredImage is the last image to be triggered.", +} + +func (DeploymentTriggerImageChangeParams) SwaggerDoc() map[string]string { + return map_DeploymentTriggerImageChangeParams +} + +var map_DeploymentTriggerPolicy = map[string]string{ + "": "DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment.", + "type": "Type of the trigger", + "imageChangeParams": "ImageChangeParams represents the parameters for the ImageChange trigger.", +} + +func (DeploymentTriggerPolicy) SwaggerDoc() map[string]string { + return map_DeploymentTriggerPolicy +} + +var map_ExecNewPodHook = map[string]string{ + "": "ExecNewPodHook is a hook implementation which runs a command in a new pod based on the specified container which is assumed to be part of the deployment template.", + "command": "Command is the action command and its arguments.", + "env": "Env is a set of environment variables to supply to the hook pod's container.", + "containerName": "ContainerName is the name of a container in the deployment pod template whose container image will be used for the hook pod's container.", + "volumes": "Volumes is a list of named volumes from the pod template which should be copied to the hook pod. Volumes names not found in pod spec are ignored. An empty list means no volumes will be copied.", +} + +func (ExecNewPodHook) SwaggerDoc() map[string]string { + return map_ExecNewPodHook +} + +var map_LifecycleHook = map[string]string{ + "": "LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time.", + "failurePolicy": "FailurePolicy specifies what action to take if the hook fails.", + "execNewPod": "ExecNewPod specifies the options for a lifecycle hook backed by a pod.", + "tagImages": "TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.", +} + +func (LifecycleHook) SwaggerDoc() map[string]string { + return map_LifecycleHook +} + +var map_RecreateDeploymentStrategyParams = map[string]string{ + "": "RecreateDeploymentStrategyParams are the input to the Recreate deployment strategy.", + "timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", + "pre": "Pre is a lifecycle hook which is executed before the strategy manipulates the deployment. All LifecycleHookFailurePolicy values are supported.", + "mid": "Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new pod is created. All LifecycleHookFailurePolicy values are supported.", + "post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", +} + +func (RecreateDeploymentStrategyParams) SwaggerDoc() map[string]string { + return map_RecreateDeploymentStrategyParams +} + +var map_RollingDeploymentStrategyParams = map[string]string{ + "": "RollingDeploymentStrategyParams are the input to the Rolling deployment strategy.", + "updatePeriodSeconds": "UpdatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.", + "intervalSeconds": "IntervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.", + "timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", + "maxUnavailable": "MaxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). Absolute number is calculated from percentage by rounding down.\n\nThis cannot be 0 if MaxSurge is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the old RC can be scaled down by 30% immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that at least 70% of original number of pods are available at all times during the update.", + "maxSurge": "MaxSurge is the maximum number of pods that can be scheduled above the original number of pods. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up.\n\nThis cannot be 0 if MaxUnavailable is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the new RC can be scaled up by 30% immediately when the rolling update starts. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of original pods.", + "pre": "Pre is a lifecycle hook which is executed before the deployment process begins. All LifecycleHookFailurePolicy values are supported.", + "post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", +} + +func (RollingDeploymentStrategyParams) SwaggerDoc() map[string]string { + return map_RollingDeploymentStrategyParams +} + +var map_TagImageHook = map[string]string{ + "": "TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag.", + "containerName": "ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single container this value will be defaulted to the name of that container.", + "to": "To is the target ImageStreamTag to set the container's image onto.", +} + +func (TagImageHook) SwaggerDoc() map[string]string { + return map_TagImageHook +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/authorization/install.go b/vendor/github.com/openshift/api/authorization/install.go new file mode 100644 index 0000000000000000000000000000000000000000..08ecc95f49d78c233299a2c1c235cbc0c529ddd9 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/install.go @@ -0,0 +1,26 @@ +package authorization + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + authorizationv1 "github.com/openshift/api/authorization/v1" +) + +const ( + GroupName = "authorization.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(authorizationv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/authorization/v1/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml b/vendor/github.com/openshift/api/authorization/v1/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cbbe69d2841e144a28b768ec549f9f3c4ec7e216 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml @@ -0,0 +1,205 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: rolebindingrestrictions.authorization.openshift.io +spec: + group: authorization.openshift.io + names: + kind: RoleBindingRestriction + listKind: RoleBindingRestrictionList + plural: rolebindingrestrictions + singular: rolebindingrestriction + scope: Namespaced + preserveUnknownFields: false + versions: + - name: v1 + served: true + storage: true + "validation": + "openAPIV3Schema": + description: RoleBindingRestriction is an object that can be matched against + a subject (user, group, or service account) to determine whether rolebindings + on that subject are allowed in the namespace to which the RoleBindingRestriction + belongs. If any one of those RoleBindingRestriction objects matches a subject, + rolebindings on that subject in the namespace are allowed. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the matcher. + type: object + properties: + grouprestriction: + description: GroupRestriction matches against group subjects. + type: object + properties: + groups: + description: Groups is a list of groups used to match against an + individual user's groups. If the user is a member of one of the + whitelisted groups, the user is allowed to be bound to a role. + type: array + items: + type: string + nullable: true + labels: + description: Selectors specifies a list of label selectors over + group labels. + type: array + items: + description: A label selector is a label query over a set of resources. + The result of matchLabels and matchExpressions are ANDed. An + empty label selector matches all objects. A null label selector + matches no objects. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + nullable: true + nullable: true + serviceaccountrestriction: + description: ServiceAccountRestriction matches against service-account + subjects. + type: object + properties: + namespaces: + description: Namespaces specifies a list of literal namespace names. + type: array + items: + type: string + serviceaccounts: + description: ServiceAccounts specifies a list of literal service-account + names. + type: array + items: + description: ServiceAccountReference specifies a service account + and namespace by their names. + type: object + properties: + name: + description: Name is the name of the service account. + type: string + namespace: + description: Namespace is the namespace of the service account. Service + accounts from inside the whitelisted namespaces are allowed + to be bound to roles. If Namespace is empty, then the namespace + of the RoleBindingRestriction in which the ServiceAccountReference + is embedded is used. + type: string + nullable: true + userrestriction: + description: UserRestriction matches against user subjects. + type: object + properties: + groups: + description: Groups specifies a list of literal group names. + type: array + items: + type: string + nullable: true + labels: + description: Selectors specifies a list of label selectors over + user labels. + type: array + items: + description: A label selector is a label query over a set of resources. + The result of matchLabels and matchExpressions are ANDed. An + empty label selector matches all objects. A null label selector + matches no objects. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + nullable: true + users: + description: Users specifies a list of literal user names. + type: array + items: + type: string + nullable: true diff --git a/vendor/github.com/openshift/api/authorization/v1/codec.go b/vendor/github.com/openshift/api/authorization/v1/codec.go new file mode 100644 index 0000000000000000000000000000000000000000..61f1f9f514666d769ef2a20026c390378c226689 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/codec.go @@ -0,0 +1,139 @@ +package v1 + +import ( + "github.com/openshift/api/pkg/serialization" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +var _ runtime.NestedObjectDecoder = &PolicyRule{} +var _ runtime.NestedObjectEncoder = &PolicyRule{} + +func (c *PolicyRule) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + serialization.DecodeNestedRawExtensionOrUnknown(d, &c.AttributeRestrictions) + return nil +} +func (c *PolicyRule) EncodeNestedObjects(e runtime.Encoder) error { + return serialization.EncodeNestedRawExtension(e, &c.AttributeRestrictions) +} + +var _ runtime.NestedObjectDecoder = &SelfSubjectRulesReview{} +var _ runtime.NestedObjectEncoder = &SelfSubjectRulesReview{} + +func (c *SelfSubjectRulesReview) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for i := range c.Status.Rules { + c.Status.Rules[i].DecodeNestedObjects(d) + } + return nil +} +func (c *SelfSubjectRulesReview) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Status.Rules { + if err := c.Status.Rules[i].EncodeNestedObjects(e); err != nil { + return err + } + } + return nil +} + +var _ runtime.NestedObjectDecoder = &SubjectRulesReview{} +var _ runtime.NestedObjectEncoder = &SubjectRulesReview{} + +func (c *SubjectRulesReview) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for i := range c.Status.Rules { + c.Status.Rules[i].DecodeNestedObjects(d) + } + return nil +} +func (c *SubjectRulesReview) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Status.Rules { + if err := c.Status.Rules[i].EncodeNestedObjects(e); err != nil { + return err + } + } + return nil +} + +var _ runtime.NestedObjectDecoder = &ClusterRole{} +var _ runtime.NestedObjectEncoder = &ClusterRole{} + +func (c *ClusterRole) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for i := range c.Rules { + c.Rules[i].DecodeNestedObjects(d) + } + return nil +} +func (c *ClusterRole) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Rules { + if err := c.Rules[i].EncodeNestedObjects(e); err != nil { + return err + } + } + return nil +} + +var _ runtime.NestedObjectDecoder = &Role{} +var _ runtime.NestedObjectEncoder = &Role{} + +func (c *Role) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for i := range c.Rules { + c.Rules[i].DecodeNestedObjects(d) + } + return nil +} +func (c *Role) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Rules { + if err := c.Rules[i].EncodeNestedObjects(e); err != nil { + return err + } + } + return nil +} + +var _ runtime.NestedObjectDecoder = &ClusterRoleList{} +var _ runtime.NestedObjectEncoder = &ClusterRoleList{} + +func (c *ClusterRoleList) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for i := range c.Items { + c.Items[i].DecodeNestedObjects(d) + } + return nil +} +func (c *ClusterRoleList) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Items { + if err := c.Items[i].EncodeNestedObjects(e); err != nil { + return err + } + } + return nil +} + +var _ runtime.NestedObjectDecoder = &RoleList{} +var _ runtime.NestedObjectEncoder = &RoleList{} + +func (c *RoleList) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for i := range c.Items { + c.Items[i].DecodeNestedObjects(d) + } + return nil +} +func (c *RoleList) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Items { + if err := c.Items[i].EncodeNestedObjects(e); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/openshift/api/authorization/v1/doc.go b/vendor/github.com/openshift/api/authorization/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..a66741dce6b5d765cb67b179f6cc6659482c5afe --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/doc.go @@ -0,0 +1,9 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/authorization/apis/authorization +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=authorization.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/authorization/v1/generated.pb.go b/vendor/github.com/openshift/api/authorization/v1/generated.pb.go new file mode 100644 index 0000000000000000000000000000000000000000..b18c5cfff04e2766471770e5d3c3570710f61eb4 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/generated.pb.go @@ -0,0 +1,8943 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/authorization/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v12 "k8s.io/api/core/v1" + v11 "k8s.io/api/rbac/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Action) Reset() { *m = Action{} } +func (*Action) ProtoMessage() {} +func (*Action) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{0} +} +func (m *Action) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Action) XXX_Merge(src proto.Message) { + xxx_messageInfo_Action.Merge(m, src) +} +func (m *Action) XXX_Size() int { + return m.Size() +} +func (m *Action) XXX_DiscardUnknown() { + xxx_messageInfo_Action.DiscardUnknown(m) +} + +var xxx_messageInfo_Action proto.InternalMessageInfo + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{1} +} +func (m *ClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRole.Merge(m, src) +} +func (m *ClusterRole) XXX_Size() int { + return m.Size() +} +func (m *ClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRole.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRole proto.InternalMessageInfo + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{2} +} +func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBinding.Merge(m, src) +} +func (m *ClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{3} +} +func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) +} +func (m *ClusterRoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo + +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{4} +} +func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleList.Merge(m, src) +} +func (m *ClusterRoleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo + +func (m *GroupRestriction) Reset() { *m = GroupRestriction{} } +func (*GroupRestriction) ProtoMessage() {} +func (*GroupRestriction) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{5} +} +func (m *GroupRestriction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupRestriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupRestriction.Merge(m, src) +} +func (m *GroupRestriction) XXX_Size() int { + return m.Size() +} +func (m *GroupRestriction) XXX_DiscardUnknown() { + xxx_messageInfo_GroupRestriction.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupRestriction proto.InternalMessageInfo + +func (m *IsPersonalSubjectAccessReview) Reset() { *m = IsPersonalSubjectAccessReview{} } +func (*IsPersonalSubjectAccessReview) ProtoMessage() {} +func (*IsPersonalSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{6} +} +func (m *IsPersonalSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IsPersonalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IsPersonalSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_IsPersonalSubjectAccessReview.Merge(m, src) +} +func (m *IsPersonalSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *IsPersonalSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_IsPersonalSubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_IsPersonalSubjectAccessReview proto.InternalMessageInfo + +func (m *LocalResourceAccessReview) Reset() { *m = LocalResourceAccessReview{} } +func (*LocalResourceAccessReview) ProtoMessage() {} +func (*LocalResourceAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{7} +} +func (m *LocalResourceAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalResourceAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalResourceAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalResourceAccessReview.Merge(m, src) +} +func (m *LocalResourceAccessReview) XXX_Size() int { + return m.Size() +} +func (m *LocalResourceAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_LocalResourceAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalResourceAccessReview proto.InternalMessageInfo + +func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } +func (*LocalSubjectAccessReview) ProtoMessage() {} +func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{8} +} +func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src) +} +func (m *LocalSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo + +func (m *NamedClusterRole) Reset() { *m = NamedClusterRole{} } +func (*NamedClusterRole) ProtoMessage() {} +func (*NamedClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{9} +} +func (m *NamedClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedClusterRole.Merge(m, src) +} +func (m *NamedClusterRole) XXX_Size() int { + return m.Size() +} +func (m *NamedClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_NamedClusterRole.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedClusterRole proto.InternalMessageInfo + +func (m *NamedClusterRoleBinding) Reset() { *m = NamedClusterRoleBinding{} } +func (*NamedClusterRoleBinding) ProtoMessage() {} +func (*NamedClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{10} +} +func (m *NamedClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedClusterRoleBinding.Merge(m, src) +} +func (m *NamedClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *NamedClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_NamedClusterRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedClusterRoleBinding proto.InternalMessageInfo + +func (m *NamedRole) Reset() { *m = NamedRole{} } +func (*NamedRole) ProtoMessage() {} +func (*NamedRole) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{11} +} +func (m *NamedRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedRole.Merge(m, src) +} +func (m *NamedRole) XXX_Size() int { + return m.Size() +} +func (m *NamedRole) XXX_DiscardUnknown() { + xxx_messageInfo_NamedRole.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedRole proto.InternalMessageInfo + +func (m *NamedRoleBinding) Reset() { *m = NamedRoleBinding{} } +func (*NamedRoleBinding) ProtoMessage() {} +func (*NamedRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{12} +} +func (m *NamedRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedRoleBinding.Merge(m, src) +} +func (m *NamedRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *NamedRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_NamedRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedRoleBinding proto.InternalMessageInfo + +func (m *OptionalNames) Reset() { *m = OptionalNames{} } +func (*OptionalNames) ProtoMessage() {} +func (*OptionalNames) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{13} +} +func (m *OptionalNames) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OptionalNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OptionalNames) XXX_Merge(src proto.Message) { + xxx_messageInfo_OptionalNames.Merge(m, src) +} +func (m *OptionalNames) XXX_Size() int { + return m.Size() +} +func (m *OptionalNames) XXX_DiscardUnknown() { + xxx_messageInfo_OptionalNames.DiscardUnknown(m) +} + +var xxx_messageInfo_OptionalNames proto.InternalMessageInfo + +func (m *OptionalScopes) Reset() { *m = OptionalScopes{} } +func (*OptionalScopes) ProtoMessage() {} +func (*OptionalScopes) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{14} +} +func (m *OptionalScopes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OptionalScopes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OptionalScopes) XXX_Merge(src proto.Message) { + xxx_messageInfo_OptionalScopes.Merge(m, src) +} +func (m *OptionalScopes) XXX_Size() int { + return m.Size() +} +func (m *OptionalScopes) XXX_DiscardUnknown() { + xxx_messageInfo_OptionalScopes.DiscardUnknown(m) +} + +var xxx_messageInfo_OptionalScopes proto.InternalMessageInfo + +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{15} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo + +func (m *ResourceAccessReview) Reset() { *m = ResourceAccessReview{} } +func (*ResourceAccessReview) ProtoMessage() {} +func (*ResourceAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{16} +} +func (m *ResourceAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAccessReview.Merge(m, src) +} +func (m *ResourceAccessReview) XXX_Size() int { + return m.Size() +} +func (m *ResourceAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAccessReview proto.InternalMessageInfo + +func (m *ResourceAccessReviewResponse) Reset() { *m = ResourceAccessReviewResponse{} } +func (*ResourceAccessReviewResponse) ProtoMessage() {} +func (*ResourceAccessReviewResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{17} +} +func (m *ResourceAccessReviewResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAccessReviewResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceAccessReviewResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAccessReviewResponse.Merge(m, src) +} +func (m *ResourceAccessReviewResponse) XXX_Size() int { + return m.Size() +} +func (m *ResourceAccessReviewResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAccessReviewResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAccessReviewResponse proto.InternalMessageInfo + +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{18} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} + +var xxx_messageInfo_Role proto.InternalMessageInfo + +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{19} +} +func (m *RoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBinding.Merge(m, src) +} +func (m *RoleBinding) XXX_Size() int { + return m.Size() +} +func (m *RoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBinding proto.InternalMessageInfo + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{20} +} +func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingList.Merge(m, src) +} +func (m *RoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo + +func (m *RoleBindingRestriction) Reset() { *m = RoleBindingRestriction{} } +func (*RoleBindingRestriction) ProtoMessage() {} +func (*RoleBindingRestriction) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{21} +} +func (m *RoleBindingRestriction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingRestriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingRestriction.Merge(m, src) +} +func (m *RoleBindingRestriction) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingRestriction) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingRestriction.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingRestriction proto.InternalMessageInfo + +func (m *RoleBindingRestrictionList) Reset() { *m = RoleBindingRestrictionList{} } +func (*RoleBindingRestrictionList) ProtoMessage() {} +func (*RoleBindingRestrictionList) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{22} +} +func (m *RoleBindingRestrictionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingRestrictionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingRestrictionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingRestrictionList.Merge(m, src) +} +func (m *RoleBindingRestrictionList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingRestrictionList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingRestrictionList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingRestrictionList proto.InternalMessageInfo + +func (m *RoleBindingRestrictionSpec) Reset() { *m = RoleBindingRestrictionSpec{} } +func (*RoleBindingRestrictionSpec) ProtoMessage() {} +func (*RoleBindingRestrictionSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{23} +} +func (m *RoleBindingRestrictionSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingRestrictionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingRestrictionSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingRestrictionSpec.Merge(m, src) +} +func (m *RoleBindingRestrictionSpec) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingRestrictionSpec) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingRestrictionSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingRestrictionSpec proto.InternalMessageInfo + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{24} +} +func (m *RoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleList.Merge(m, src) +} +func (m *RoleList) XXX_Size() int { + return m.Size() +} +func (m *RoleList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleList proto.InternalMessageInfo + +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{25} +} +func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src) +} +func (m *SelfSubjectRulesReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo + +func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } +func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} +func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{26} +} +func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo + +func (m *ServiceAccountReference) Reset() { *m = ServiceAccountReference{} } +func (*ServiceAccountReference) ProtoMessage() {} +func (*ServiceAccountReference) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{27} +} +func (m *ServiceAccountReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountReference.Merge(m, src) +} +func (m *ServiceAccountReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountReference proto.InternalMessageInfo + +func (m *ServiceAccountRestriction) Reset() { *m = ServiceAccountRestriction{} } +func (*ServiceAccountRestriction) ProtoMessage() {} +func (*ServiceAccountRestriction) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{28} +} +func (m *ServiceAccountRestriction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountRestriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountRestriction.Merge(m, src) +} +func (m *ServiceAccountRestriction) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountRestriction) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountRestriction.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountRestriction proto.InternalMessageInfo + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{29} +} +func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReview.Merge(m, src) +} +func (m *SubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo + +func (m *SubjectAccessReviewResponse) Reset() { *m = SubjectAccessReviewResponse{} } +func (*SubjectAccessReviewResponse) ProtoMessage() {} +func (*SubjectAccessReviewResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{30} +} +func (m *SubjectAccessReviewResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewResponse.Merge(m, src) +} +func (m *SubjectAccessReviewResponse) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReviewResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewResponse proto.InternalMessageInfo + +func (m *SubjectRulesReview) Reset() { *m = SubjectRulesReview{} } +func (*SubjectRulesReview) ProtoMessage() {} +func (*SubjectRulesReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{31} +} +func (m *SubjectRulesReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReview.Merge(m, src) +} +func (m *SubjectRulesReview) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReview) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectRulesReview proto.InternalMessageInfo + +func (m *SubjectRulesReviewSpec) Reset() { *m = SubjectRulesReviewSpec{} } +func (*SubjectRulesReviewSpec) ProtoMessage() {} +func (*SubjectRulesReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{32} +} +func (m *SubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReviewSpec.Merge(m, src) +} +func (m *SubjectRulesReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectRulesReviewSpec proto.InternalMessageInfo + +func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } +func (*SubjectRulesReviewStatus) ProtoMessage() {} +func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{33} +} +func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src) +} +func (m *SubjectRulesReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo + +func (m *UserRestriction) Reset() { *m = UserRestriction{} } +func (*UserRestriction) ProtoMessage() {} +func (*UserRestriction) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{34} +} +func (m *UserRestriction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserRestriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserRestriction.Merge(m, src) +} +func (m *UserRestriction) XXX_Size() int { + return m.Size() +} +func (m *UserRestriction) XXX_DiscardUnknown() { + xxx_messageInfo_UserRestriction.DiscardUnknown(m) +} + +var xxx_messageInfo_UserRestriction proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Action)(nil), "github.com.openshift.api.authorization.v1.Action") + proto.RegisterType((*ClusterRole)(nil), "github.com.openshift.api.authorization.v1.ClusterRole") + proto.RegisterType((*ClusterRoleBinding)(nil), "github.com.openshift.api.authorization.v1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingList)(nil), "github.com.openshift.api.authorization.v1.ClusterRoleBindingList") + proto.RegisterType((*ClusterRoleList)(nil), "github.com.openshift.api.authorization.v1.ClusterRoleList") + proto.RegisterType((*GroupRestriction)(nil), "github.com.openshift.api.authorization.v1.GroupRestriction") + proto.RegisterType((*IsPersonalSubjectAccessReview)(nil), "github.com.openshift.api.authorization.v1.IsPersonalSubjectAccessReview") + proto.RegisterType((*LocalResourceAccessReview)(nil), "github.com.openshift.api.authorization.v1.LocalResourceAccessReview") + proto.RegisterType((*LocalSubjectAccessReview)(nil), "github.com.openshift.api.authorization.v1.LocalSubjectAccessReview") + proto.RegisterType((*NamedClusterRole)(nil), "github.com.openshift.api.authorization.v1.NamedClusterRole") + proto.RegisterType((*NamedClusterRoleBinding)(nil), "github.com.openshift.api.authorization.v1.NamedClusterRoleBinding") + proto.RegisterType((*NamedRole)(nil), "github.com.openshift.api.authorization.v1.NamedRole") + proto.RegisterType((*NamedRoleBinding)(nil), "github.com.openshift.api.authorization.v1.NamedRoleBinding") + proto.RegisterType((*OptionalNames)(nil), "github.com.openshift.api.authorization.v1.OptionalNames") + proto.RegisterType((*OptionalScopes)(nil), "github.com.openshift.api.authorization.v1.OptionalScopes") + proto.RegisterType((*PolicyRule)(nil), "github.com.openshift.api.authorization.v1.PolicyRule") + proto.RegisterType((*ResourceAccessReview)(nil), "github.com.openshift.api.authorization.v1.ResourceAccessReview") + proto.RegisterType((*ResourceAccessReviewResponse)(nil), "github.com.openshift.api.authorization.v1.ResourceAccessReviewResponse") + proto.RegisterType((*Role)(nil), "github.com.openshift.api.authorization.v1.Role") + proto.RegisterType((*RoleBinding)(nil), "github.com.openshift.api.authorization.v1.RoleBinding") + proto.RegisterType((*RoleBindingList)(nil), "github.com.openshift.api.authorization.v1.RoleBindingList") + proto.RegisterType((*RoleBindingRestriction)(nil), "github.com.openshift.api.authorization.v1.RoleBindingRestriction") + proto.RegisterType((*RoleBindingRestrictionList)(nil), "github.com.openshift.api.authorization.v1.RoleBindingRestrictionList") + proto.RegisterType((*RoleBindingRestrictionSpec)(nil), "github.com.openshift.api.authorization.v1.RoleBindingRestrictionSpec") + proto.RegisterType((*RoleList)(nil), "github.com.openshift.api.authorization.v1.RoleList") + proto.RegisterType((*SelfSubjectRulesReview)(nil), "github.com.openshift.api.authorization.v1.SelfSubjectRulesReview") + proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "github.com.openshift.api.authorization.v1.SelfSubjectRulesReviewSpec") + proto.RegisterType((*ServiceAccountReference)(nil), "github.com.openshift.api.authorization.v1.ServiceAccountReference") + proto.RegisterType((*ServiceAccountRestriction)(nil), "github.com.openshift.api.authorization.v1.ServiceAccountRestriction") + proto.RegisterType((*SubjectAccessReview)(nil), "github.com.openshift.api.authorization.v1.SubjectAccessReview") + proto.RegisterType((*SubjectAccessReviewResponse)(nil), "github.com.openshift.api.authorization.v1.SubjectAccessReviewResponse") + proto.RegisterType((*SubjectRulesReview)(nil), "github.com.openshift.api.authorization.v1.SubjectRulesReview") + proto.RegisterType((*SubjectRulesReviewSpec)(nil), "github.com.openshift.api.authorization.v1.SubjectRulesReviewSpec") + proto.RegisterType((*SubjectRulesReviewStatus)(nil), "github.com.openshift.api.authorization.v1.SubjectRulesReviewStatus") + proto.RegisterType((*UserRestriction)(nil), "github.com.openshift.api.authorization.v1.UserRestriction") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/authorization/v1/generated.proto", fileDescriptor_39b89822f939ca46) +} + +var fileDescriptor_39b89822f939ca46 = []byte{ + // 1821 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x19, 0xcd, 0x6f, 0x1b, 0x59, + 0x3d, 0xcf, 0x76, 0x1c, 0xfb, 0xe7, 0x26, 0xce, 0xbe, 0x66, 0xdb, 0x69, 0xa0, 0xb6, 0x35, 0x20, + 0xc8, 0x0a, 0x76, 0x4c, 0x02, 0x94, 0xb6, 0x2b, 0xb4, 0xb2, 0xbb, 0x51, 0x15, 0xa9, 0xb4, 0xd9, + 0x17, 0x76, 0xb5, 0x5a, 0x3e, 0xc4, 0x78, 0xf2, 0x62, 0x0f, 0x19, 0xcf, 0x58, 0xf3, 0xc6, 0x2e, + 0x05, 0x21, 0x15, 0x24, 0x0e, 0x5c, 0xd0, 0x5e, 0x40, 0x1c, 0x41, 0xfc, 0x01, 0x88, 0x0b, 0x12, + 0x48, 0x70, 0xe2, 0xd0, 0x03, 0x87, 0x95, 0xb8, 0x54, 0x08, 0x19, 0xea, 0x22, 0x0e, 0x1c, 0xf8, + 0x1b, 0xd0, 0x7b, 0xf3, 0xc6, 0xf3, 0xe1, 0xb1, 0xe2, 0x49, 0x9a, 0x08, 0x56, 0xbd, 0x79, 0xde, + 0xef, 0xfb, 0xf3, 0xfd, 0x7e, 0xcf, 0x70, 0xab, 0x6b, 0x7a, 0xbd, 0x61, 0x47, 0x33, 0x9c, 0x7e, + 0xd3, 0x19, 0x50, 0x9b, 0xf5, 0xcc, 0x23, 0xaf, 0xa9, 0x0f, 0xcc, 0xa6, 0x3e, 0xf4, 0x7a, 0x8e, + 0x6b, 0x7e, 0x57, 0xf7, 0x4c, 0xc7, 0x6e, 0x8e, 0xb6, 0x9b, 0x5d, 0x6a, 0x53, 0x57, 0xf7, 0xe8, + 0xa1, 0x36, 0x70, 0x1d, 0xcf, 0xc1, 0xaf, 0x85, 0xa4, 0xda, 0x94, 0x54, 0xd3, 0x07, 0xa6, 0x16, + 0x23, 0xd5, 0x46, 0xdb, 0x9b, 0xaf, 0x47, 0xa4, 0x74, 0x9d, 0xae, 0xd3, 0x14, 0x1c, 0x3a, 0xc3, + 0x23, 0xf1, 0x25, 0x3e, 0xc4, 0x2f, 0x9f, 0xf3, 0xa6, 0x7a, 0x7c, 0x93, 0x69, 0xa6, 0x23, 0xd4, + 0x30, 0x1c, 0x97, 0xa6, 0x48, 0x8f, 0xe1, 0xb8, 0x1d, 0xdd, 0x48, 0xc3, 0xf9, 0x42, 0x88, 0xd3, + 0xd7, 0x8d, 0x9e, 0x69, 0x53, 0xf7, 0x51, 0x73, 0x70, 0xdc, 0xe5, 0x07, 0xac, 0xd9, 0xa7, 0x9e, + 0x9e, 0x46, 0xd5, 0x9c, 0x47, 0xe5, 0x0e, 0x6d, 0xcf, 0xec, 0xd3, 0x19, 0x82, 0x1b, 0x27, 0x11, + 0x30, 0xa3, 0x47, 0xfb, 0x7a, 0x92, 0x4e, 0xfd, 0x41, 0x01, 0x8a, 0x2d, 0x83, 0xfb, 0x08, 0x37, + 0xa1, 0x6c, 0xeb, 0x7d, 0xca, 0x06, 0xba, 0x41, 0x15, 0xd4, 0x40, 0x5b, 0xe5, 0xf6, 0x2b, 0x4f, + 0xc6, 0xf5, 0xa5, 0xc9, 0xb8, 0x5e, 0xbe, 0x1f, 0x00, 0x48, 0x88, 0x83, 0x1b, 0x50, 0x18, 0x51, + 0xb7, 0xa3, 0xe4, 0x04, 0xee, 0x25, 0x89, 0x5b, 0x78, 0x97, 0xba, 0x1d, 0x22, 0x20, 0xf8, 0x16, + 0xac, 0xbb, 0x94, 0x39, 0x43, 0xd7, 0xa0, 0xad, 0xfd, 0xbd, 0xbb, 0xae, 0x33, 0x1c, 0x28, 0x79, + 0x81, 0xbd, 0x2a, 0xb1, 0x97, 0xc5, 0x21, 0x99, 0x41, 0xc3, 0x6f, 0x02, 0x8e, 0x9c, 0xbd, 0x4b, + 0x5d, 0x66, 0x3a, 0xb6, 0x52, 0x10, 0xc4, 0x55, 0x49, 0xbc, 0x22, 0x8f, 0x49, 0x0a, 0x2a, 0xfe, + 0x2c, 0x94, 0x82, 0x53, 0x65, 0x59, 0x90, 0xad, 0x4b, 0xb2, 0x12, 0x91, 0xe7, 0x64, 0x8a, 0x81, + 0x6f, 0xc2, 0xa5, 0xe0, 0x37, 0xb7, 0x55, 0x29, 0x0a, 0x8a, 0x0d, 0x49, 0x71, 0x89, 0x44, 0x60, + 0x24, 0x86, 0xc9, 0xbd, 0x30, 0xd0, 0xbd, 0x9e, 0x52, 0x8a, 0x7b, 0x61, 0x5f, 0xf7, 0x7a, 0x44, + 0x40, 0xf0, 0x5b, 0xb0, 0x6e, 0xb2, 0xfb, 0x8e, 0x1d, 0x30, 0x79, 0x87, 0xdc, 0x53, 0xca, 0x0d, + 0xb4, 0x55, 0x6a, 0x2b, 0x12, 0x7b, 0x7d, 0x2f, 0x01, 0x27, 0x33, 0x14, 0xf8, 0x3d, 0x58, 0x31, + 0x1c, 0xdb, 0xa3, 0xb6, 0xa7, 0xac, 0x34, 0xd0, 0x56, 0x65, 0xe7, 0x75, 0xcd, 0x8f, 0xb9, 0x16, + 0x8d, 0xb9, 0x36, 0x38, 0xee, 0x6a, 0x32, 0xe6, 0x1a, 0xd1, 0x1f, 0xee, 0x7e, 0xc7, 0xa3, 0x36, + 0xf7, 0x47, 0xe8, 0xb4, 0x3b, 0x3e, 0x17, 0x12, 0xb0, 0x53, 0x7f, 0x9d, 0x83, 0xca, 0x1d, 0x6b, + 0xc8, 0x3c, 0xea, 0x12, 0xc7, 0xa2, 0xf8, 0x5b, 0x50, 0xe2, 0x79, 0x79, 0xa8, 0x7b, 0xba, 0xc8, + 0x83, 0xca, 0xce, 0xe7, 0xe6, 0x8a, 0xe2, 0x59, 0xac, 0x71, 0x6c, 0x6d, 0xb4, 0xad, 0x3d, 0xe8, + 0x7c, 0x9b, 0x1a, 0xde, 0x57, 0xa8, 0xa7, 0xb7, 0xb1, 0x94, 0x06, 0xe1, 0x19, 0x99, 0x72, 0xc5, + 0xef, 0xc3, 0xb2, 0x3b, 0xb4, 0x28, 0x53, 0x72, 0x8d, 0xfc, 0x56, 0x65, 0xe7, 0x8b, 0xda, 0xc2, + 0x65, 0xac, 0xed, 0x3b, 0x96, 0x69, 0x3c, 0x22, 0x43, 0x8b, 0x86, 0x39, 0xc4, 0xbf, 0x18, 0xf1, + 0x59, 0xe2, 0x0e, 0x54, 0xf5, 0x6e, 0xd7, 0xa5, 0x5d, 0x41, 0xc2, 0x41, 0x22, 0xe5, 0x2a, 0x3b, + 0x9f, 0x88, 0x18, 0xa1, 0xf1, 0x72, 0xe5, 0xec, 0x5a, 0x71, 0xd4, 0xf6, 0xe5, 0xc9, 0xb8, 0x5e, + 0x4d, 0x1c, 0x92, 0x24, 0x43, 0xf5, 0xdf, 0x79, 0xc0, 0x11, 0x8f, 0xb5, 0x4d, 0xfb, 0xd0, 0xb4, + 0xbb, 0x17, 0xe0, 0x38, 0x0a, 0xe5, 0x21, 0xa3, 0xae, 0x28, 0x47, 0x51, 0x77, 0x95, 0x9d, 0x9b, + 0x19, 0x9c, 0xf7, 0x60, 0xc0, 0x7f, 0xe9, 0x96, 0xa0, 0x6f, 0xaf, 0xf2, 0xca, 0x7e, 0x27, 0x60, + 0x47, 0x42, 0xce, 0xb8, 0x07, 0xd0, 0xe5, 0x55, 0xe8, 0xcb, 0xc9, 0x9f, 0x51, 0xce, 0x1a, 0x37, + 0xe7, 0xee, 0x94, 0x1f, 0x89, 0xf0, 0xc6, 0x6f, 0x43, 0x89, 0x0d, 0x85, 0xa5, 0x4c, 0x29, 0x88, + 0x64, 0x88, 0x85, 0x89, 0x77, 0xde, 0xd0, 0x41, 0x84, 0x1e, 0x51, 0x97, 0xda, 0x06, 0x0d, 0x4b, + 0xf9, 0x40, 0x12, 0x93, 0x29, 0x1b, 0x7c, 0x1f, 0x56, 0x5c, 0xc7, 0xa2, 0x84, 0x1e, 0x89, 0xba, + 0x5f, 0x90, 0xe3, 0xb4, 0x3c, 0x88, 0x4f, 0x4b, 0x02, 0x26, 0xea, 0x5f, 0x11, 0x5c, 0x99, 0x0d, + 0xf6, 0x3d, 0x93, 0x79, 0xf8, 0xeb, 0x33, 0x01, 0xd7, 0x16, 0x0b, 0x38, 0xa7, 0x16, 0xe1, 0x9e, + 0x1a, 0x12, 0x9c, 0x44, 0x82, 0xdd, 0x81, 0x65, 0xd3, 0xa3, 0xfd, 0xa0, 0x4a, 0xbe, 0x9c, 0x21, + 0x00, 0xb3, 0xfa, 0x86, 0xd5, 0xb2, 0xc7, 0x79, 0x12, 0x9f, 0xb5, 0xfa, 0x67, 0x04, 0xd5, 0x08, + 0xf2, 0x05, 0x58, 0xf5, 0xb5, 0xb8, 0x55, 0x37, 0x4e, 0x69, 0x55, 0xba, 0x39, 0x3f, 0x43, 0xb0, + 0xee, 0xdf, 0x28, 0x94, 0x79, 0xae, 0xe9, 0x5f, 0x6c, 0x2a, 0x14, 0x45, 0xc6, 0x31, 0x05, 0x35, + 0xf2, 0x5b, 0xe5, 0x36, 0x4c, 0xc6, 0xf5, 0xa2, 0xc0, 0x62, 0x44, 0x42, 0xf0, 0x37, 0xa1, 0x68, + 0xe9, 0x1d, 0x6a, 0x05, 0x6a, 0x7d, 0x7e, 0x41, 0x8b, 0x39, 0xcd, 0x01, 0xb5, 0xa8, 0xe1, 0x39, + 0x6e, 0x78, 0x5d, 0x06, 0x27, 0x8c, 0x48, 0xae, 0x6a, 0x1d, 0xae, 0xef, 0xb1, 0x7d, 0xea, 0x32, + 0x5e, 0x16, 0x32, 0x69, 0x5b, 0x86, 0x41, 0x19, 0x23, 0x74, 0x64, 0xd2, 0x87, 0xaa, 0x05, 0xd7, + 0xee, 0x39, 0x86, 0x6e, 0x05, 0x2d, 0x3f, 0x0a, 0xc4, 0x0f, 0x82, 0x4b, 0x5a, 0xc6, 0x63, 0x3b, + 0x83, 0xd3, 0x7c, 0xc2, 0x76, 0x81, 0xeb, 0x46, 0x8a, 0xba, 0xf8, 0x52, 0x7f, 0x9a, 0x03, 0x45, + 0x88, 0x4b, 0x51, 0xe5, 0x85, 0x4b, 0xe3, 0x57, 0x24, 0xef, 0x2d, 0xc9, 0x41, 0x81, 0xb7, 0x1e, + 0x22, 0x20, 0xf8, 0xd3, 0xd3, 0x10, 0xe5, 0x45, 0x88, 0xaa, 0x93, 0x71, 0xbd, 0xe2, 0x87, 0xe8, + 0xc0, 0x32, 0x0d, 0x3a, 0x8d, 0xd3, 0x37, 0xa0, 0xc8, 0x0c, 0x67, 0x40, 0x99, 0x18, 0x05, 0x2a, + 0x3b, 0xb7, 0x4e, 0xd1, 0x95, 0x0e, 0x04, 0x03, 0x3f, 0x0d, 0xfc, 0xdf, 0x44, 0x32, 0x55, 0x7f, + 0x82, 0x60, 0x9d, 0x37, 0xa6, 0xc3, 0xe8, 0x7d, 0xd8, 0x80, 0x02, 0x1f, 0x7a, 0xe4, 0x4c, 0x34, + 0x55, 0x5f, 0xcc, 0x02, 0x02, 0x82, 0xdf, 0x83, 0x02, 0xef, 0x16, 0xb2, 0x23, 0x9f, 0x36, 0xa5, + 0xa7, 0x9c, 0x45, 0x0b, 0x12, 0x1c, 0xd5, 0xdf, 0x20, 0xb8, 0x9a, 0x54, 0x28, 0xb8, 0x6e, 0x4e, + 0xd6, 0xcb, 0x83, 0x8a, 0x1b, 0x12, 0x48, 0xf5, 0xce, 0xd8, 0x47, 0x2e, 0x4b, 0x39, 0x95, 0xc8, + 0x21, 0x89, 0x8a, 0x51, 0x1f, 0x23, 0x10, 0x03, 0xe3, 0xe1, 0x82, 0xde, 0x7b, 0x3b, 0xe6, 0xbd, + 0x66, 0x06, 0xf5, 0xe6, 0xba, 0xed, 0x57, 0x41, 0x1c, 0xb3, 0xf9, 0xab, 0x9f, 0xe6, 0xaf, 0x1b, + 0x59, 0x15, 0x5a, 0xd8, 0x51, 0xb7, 0x61, 0x35, 0x76, 0x53, 0xe2, 0x7a, 0xd0, 0x1b, 0xfd, 0x46, + 0x55, 0x4e, 0xf6, 0xb7, 0xdb, 0xa5, 0x9f, 0xff, 0xa2, 0xbe, 0xf4, 0xf8, 0x6f, 0x8d, 0x25, 0xf5, + 0x0d, 0x58, 0x8b, 0xe7, 0x73, 0x16, 0xe2, 0x1f, 0xe7, 0x01, 0xc2, 0x41, 0x8a, 0x53, 0xf2, 0x71, + 0x3d, 0x46, 0xc9, 0xa7, 0x78, 0x46, 0xfc, 0x73, 0xfc, 0x43, 0x04, 0xaf, 0xea, 0x9e, 0xe7, 0x9a, + 0x9d, 0xa1, 0x47, 0x23, 0xad, 0x35, 0x98, 0x41, 0x32, 0x8e, 0xa2, 0xd7, 0xa5, 0x67, 0x5e, 0x6d, + 0xa5, 0xf1, 0x24, 0xe9, 0xa2, 0xf0, 0x67, 0xa0, 0xac, 0x0f, 0xcc, 0xbb, 0xd1, 0x36, 0x21, 0x26, + 0x98, 0x60, 0x65, 0x60, 0x24, 0x84, 0x73, 0xe4, 0x60, 0x4a, 0xf7, 0x07, 0x0b, 0x89, 0x1c, 0xb4, + 0x57, 0x46, 0x42, 0x38, 0xfe, 0x12, 0xac, 0x46, 0x47, 0x7a, 0xa6, 0x2c, 0x0b, 0x82, 0x57, 0x26, + 0xe3, 0xfa, 0x6a, 0x74, 0xf2, 0x67, 0x24, 0x8e, 0x87, 0xdb, 0x50, 0xb5, 0x63, 0x53, 0x3a, 0x53, + 0x8a, 0x82, 0x54, 0x99, 0x8c, 0xeb, 0x1b, 0xf1, 0x01, 0x5e, 0x36, 0xb2, 0x24, 0x81, 0xda, 0x85, + 0x8d, 0x8b, 0xe9, 0xf9, 0x7f, 0x47, 0xf0, 0xf1, 0x34, 0x49, 0x84, 0xb2, 0x81, 0x63, 0x33, 0x9a, + 0x7d, 0x01, 0xfc, 0x24, 0x2c, 0xf3, 0xee, 0xed, 0xdf, 0x99, 0x65, 0x7f, 0xce, 0xe3, 0x4d, 0x5d, + 0x9a, 0xea, 0x03, 0x17, 0xef, 0xed, 0x6f, 0xc2, 0x1a, 0x1d, 0xe9, 0xd6, 0x90, 0x6b, 0xbb, 0xeb, + 0xba, 0x8e, 0x2b, 0xd7, 0xbd, 0xab, 0x52, 0x89, 0xea, 0x2e, 0x87, 0xea, 0x53, 0x30, 0x49, 0xa0, + 0xab, 0x7f, 0x42, 0x50, 0xf8, 0xff, 0xdf, 0x60, 0xd4, 0xe7, 0x79, 0xa8, 0xbc, 0x5c, 0x2b, 0x3e, + 0xea, 0x6b, 0x05, 0x9f, 0xbc, 0x2f, 0x76, 0x9f, 0x38, 0xc3, 0xe4, 0x7d, 0xf2, 0x22, 0xf1, 0x1c, + 0xc1, 0x95, 0xe8, 0x45, 0x17, 0x99, 0xbf, 0xcf, 0x3f, 0x7f, 0xbb, 0x50, 0x60, 0x03, 0x6a, 0xc8, + 0xd4, 0xdd, 0x3d, 0x9d, 0x61, 0x11, 0x95, 0x0f, 0x06, 0xd4, 0x08, 0x07, 0x04, 0xfe, 0x45, 0x84, + 0x00, 0x75, 0x82, 0x60, 0x33, 0x9d, 0xe4, 0x02, 0xe2, 0x77, 0x14, 0x8f, 0x5f, 0xeb, 0xcc, 0x66, + 0xce, 0x09, 0xe5, 0xef, 0xf3, 0xf3, 0x8c, 0xe4, 0x9e, 0xc0, 0x8f, 0xa0, 0xca, 0x4b, 0xda, 0x0d, + 0x8f, 0xa5, 0xad, 0xb7, 0x33, 0x28, 0x24, 0x66, 0xff, 0x88, 0x26, 0xe2, 0xdd, 0x25, 0x71, 0x48, + 0x92, 0x72, 0xf0, 0xf7, 0x61, 0x5d, 0x14, 0x79, 0x54, 0xb6, 0x1f, 0xf3, 0x37, 0x32, 0xc8, 0x4e, + 0x2e, 0x88, 0xed, 0x8d, 0xc9, 0xb8, 0x3e, 0xb3, 0x36, 0x92, 0x19, 0x51, 0xf8, 0x97, 0x08, 0xae, + 0x31, 0xea, 0x8e, 0x4c, 0x83, 0xea, 0x86, 0xe1, 0x0c, 0x6d, 0x2f, 0xaa, 0x88, 0xdf, 0xcf, 0xde, + 0xca, 0xa0, 0xc8, 0x81, 0xcf, 0xab, 0xe5, 0xf3, 0x8a, 0x6a, 0x74, 0x7d, 0x32, 0xae, 0x5f, 0x9b, + 0x0b, 0x26, 0xf3, 0xb5, 0x50, 0xff, 0x88, 0xa0, 0x74, 0x41, 0x9b, 0xfc, 0x57, 0xe3, 0xf9, 0x98, + 0x79, 0x70, 0x4f, 0xcf, 0xbe, 0xff, 0x20, 0xb8, 0x72, 0x40, 0xad, 0x23, 0xd9, 0x82, 0xfd, 0x9b, + 0xd1, 0x1f, 0x89, 0x82, 0x32, 0x47, 0x99, 0xcb, 0x3c, 0x9d, 0xe1, 0xbc, 0x32, 0xc7, 0xc7, 0x50, + 0x64, 0x9e, 0xee, 0x0d, 0x83, 0xcb, 0xf0, 0x4e, 0x16, 0x51, 0xb3, 0x62, 0x04, 0xab, 0xf6, 0x9a, + 0x14, 0x54, 0xf4, 0xbf, 0x89, 0x14, 0xa1, 0x7e, 0x0f, 0x36, 0xe7, 0xab, 0x17, 0x59, 0x78, 0xd1, + 0x79, 0x2c, 0xbc, 0x16, 0x5c, 0x4d, 0xa6, 0x99, 0xbc, 0xba, 0x16, 0x58, 0x97, 0x62, 0x03, 0x63, + 0xee, 0xe4, 0x81, 0x51, 0xfd, 0x0b, 0x82, 0xf9, 0x59, 0x8d, 0x7f, 0x84, 0xa0, 0x1a, 0x4f, 0x6c, + 0x7f, 0x23, 0xa9, 0xec, 0xb4, 0xcf, 0x50, 0x54, 0xc1, 0x4d, 0x3c, 0x9d, 0x22, 0xe3, 0x08, 0x8c, + 0x24, 0x65, 0x62, 0x0d, 0x60, 0xaa, 0x72, 0x6c, 0xb6, 0x9d, 0xda, 0xc4, 0x48, 0x04, 0x43, 0xfd, + 0x20, 0x07, 0x97, 0x5f, 0xbe, 0xa3, 0xc4, 0xd2, 0xea, 0x9f, 0x08, 0x3e, 0x96, 0xe2, 0x92, 0xd3, + 0xaf, 0x1a, 0xaf, 0xc1, 0x8a, 0x6e, 0x59, 0xce, 0x43, 0x7a, 0x28, 0xac, 0x2f, 0x85, 0x83, 0x55, + 0xcb, 0x3f, 0x26, 0x01, 0x1c, 0x7f, 0x0a, 0x8a, 0x2e, 0xd5, 0x99, 0xec, 0xc8, 0xe5, 0xb0, 0xee, + 0x88, 0x38, 0x25, 0x12, 0x8a, 0x5b, 0x50, 0xa5, 0xf1, 0x85, 0xe2, 0xa4, 0x7d, 0x23, 0x89, 0xaf, + 0xfe, 0x0b, 0x01, 0x4e, 0xe9, 0x53, 0x46, 0xac, 0x4f, 0xb5, 0xce, 0xd6, 0x3c, 0xfe, 0x27, 0x7a, + 0xd4, 0x1f, 0x78, 0x53, 0x4e, 0x6f, 0x50, 0x41, 0x52, 0xa2, 0xb9, 0x49, 0x19, 0xbe, 0xbf, 0xe6, + 0xe6, 0xbe, 0xbf, 0x86, 0xf9, 0x98, 0x3f, 0x8f, 0x7c, 0xfc, 0x1d, 0x02, 0x65, 0x9e, 0xd1, 0xe1, + 0x2e, 0x87, 0x5e, 0xfc, 0xbf, 0x51, 0x29, 0x49, 0x96, 0xcb, 0x98, 0x64, 0xbf, 0x45, 0x90, 0x9c, + 0x8c, 0x70, 0x3d, 0xd8, 0xbc, 0x23, 0x2f, 0x36, 0x62, 0xf3, 0x0e, 0x96, 0xee, 0x45, 0x7c, 0x1e, + 0xbe, 0x79, 0xe7, 0xcf, 0xe3, 0xcd, 0xbb, 0xbd, 0xf5, 0xe4, 0x59, 0x6d, 0xe9, 0xc3, 0x67, 0xb5, + 0xa5, 0xa7, 0xcf, 0x6a, 0x4b, 0x8f, 0x27, 0x35, 0xf4, 0x64, 0x52, 0x43, 0x1f, 0x4e, 0x6a, 0xe8, + 0xe9, 0xa4, 0x86, 0xfe, 0x31, 0xa9, 0xa1, 0x0f, 0x9e, 0xd7, 0x96, 0xde, 0xcf, 0x8d, 0xb6, 0xff, + 0x1b, 0x00, 0x00, 0xff, 0xff, 0x39, 0xa6, 0xdb, 0xa1, 0x02, 0x20, 0x00, 0x00, +} + +func (m *Action) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Action) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Action) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.IsNonResourceURL { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x42 + { + size, err := m.Content.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + i -= len(m.ResourceName) + copy(dAtA[i:], m.ResourceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceName))) + i-- + dAtA[i] = 0x32 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x2a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x1a + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AggregationRule != nil { + { + size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.GroupNames != nil { + { + size, err := m.GroupNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.UserNames != nil { + { + size, err := m.UserNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupRestriction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupRestriction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *IsPersonalSubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IsPersonalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IsPersonalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *LocalResourceAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalResourceAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalResourceAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scopes != nil { + { + size, err := m.Scopes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.GroupsSlice) > 0 { + for iNdEx := len(m.GroupsSlice) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.GroupsSlice[iNdEx]) + copy(dAtA[i:], m.GroupsSlice[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupsSlice[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamedClusterRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedClusterRole) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Role.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamedClusterRoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RoleBinding.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamedRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedRole) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Role.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamedRoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedRoleBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RoleBinding.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m OptionalNames) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m OptionalNames) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m OptionalNames) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m OptionalScopes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m OptionalScopes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m OptionalScopes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceURLsSlice) > 0 { + for iNdEx := len(m.NonResourceURLsSlice) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLsSlice[iNdEx]) + copy(dAtA[i:], m.NonResourceURLsSlice[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLsSlice[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.AttributeRestrictions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceAccessReviewResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceAccessReviewResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAccessReviewResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x22 + if len(m.GroupsSlice) > 0 { + for iNdEx := len(m.GroupsSlice) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.GroupsSlice[iNdEx]) + copy(dAtA[i:], m.GroupsSlice[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupsSlice[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.UsersSlice) > 0 { + for iNdEx := len(m.UsersSlice) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.UsersSlice[iNdEx]) + copy(dAtA[i:], m.UsersSlice[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UsersSlice[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Role) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Role) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.GroupNames != nil { + { + size, err := m.GroupNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.UserNames != nil { + { + size, err := m.UserNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBindingRestriction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBindingRestriction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBindingRestrictionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBindingRestrictionList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingRestrictionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBindingRestrictionSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBindingRestrictionSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingRestrictionSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ServiceAccountRestriction != nil { + { + size, err := m.ServiceAccountRestriction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.GroupRestriction != nil { + { + size, err := m.GroupRestriction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.UserRestriction != nil { + { + size, err := m.UserRestriction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RoleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scopes != nil { + { + size, err := m.Scopes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ServiceAccountReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceAccountRestriction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountRestriction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ServiceAccounts) > 0 { + for iNdEx := len(m.ServiceAccounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServiceAccounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scopes != nil { + { + size, err := m.Scopes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.GroupsSlice) > 0 { + for iNdEx := len(m.GroupsSlice) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.GroupsSlice[iNdEx]) + copy(dAtA[i:], m.GroupsSlice[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupsSlice[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectAccessReviewResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReviewResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectRulesReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scopes != nil { + { + size, err := m.Scopes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x12 + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UserRestriction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserRestriction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Users) > 0 { + for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Users[iNdEx]) + copy(dAtA[i:], m.Users[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Users[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Action) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Content.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ClusterRole) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.UserNames != nil { + l = m.UserNames.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GroupNames != nil { + l = m.GroupNames.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GroupRestriction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IsPersonalSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *LocalResourceAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Action.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LocalSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Action.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.GroupsSlice) > 0 { + for _, s := range m.GroupsSlice { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Scopes != nil { + l = m.Scopes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NamedClusterRole) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Role.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamedClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.RoleBinding.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamedRole) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Role.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamedRoleBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.RoleBinding.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m OptionalNames) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m OptionalScopes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.AttributeRestrictions.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLsSlice) > 0 { + for _, s := range m.NonResourceURLsSlice { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Action.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceAccessReviewResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UsersSlice) > 0 { + for _, s := range m.UsersSlice { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.GroupsSlice) > 0 { + for _, s := range m.GroupsSlice { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Role) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.UserNames != nil { + l = m.UserNames.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GroupNames != nil { + l = m.GroupNames.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RoleBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBindingRestriction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RoleBindingRestrictionList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBindingRestrictionSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UserRestriction != nil { + l = m.UserRestriction.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GroupRestriction != nil { + l = m.GroupRestriction.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServiceAccountRestriction != nil { + l = m.ServiceAccountRestriction.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RoleList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SelfSubjectRulesReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectRulesReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Scopes != nil { + l = m.Scopes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ServiceAccountReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceAccountRestriction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ServiceAccounts) > 0 { + for _, e := range m.ServiceAccounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Action.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.GroupsSlice) > 0 { + for _, s := range m.GroupsSlice { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Scopes != nil { + l = m.Scopes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SubjectAccessReviewResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectRulesReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectRulesReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Scopes != nil { + l = m.Scopes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SubjectRulesReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *UserRestriction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Users) > 0 { + for _, s := range m.Users { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Action) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Action{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, + `Content:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Content), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `IsNonResourceURL:` + fmt.Sprintf("%v", this.IsNonResourceURL) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRole) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&ClusterRole{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "v11.AggregationRule", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBinding) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]ObjectReference{" + for _, f := range this.Subjects { + repeatedStringForSubjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForSubjects += "}" + s := strings.Join([]string{`&ClusterRoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `UserNames:` + strings.Replace(fmt.Sprintf("%v", this.UserNames), "OptionalNames", "OptionalNames", 1) + `,`, + `GroupNames:` + strings.Replace(fmt.Sprintf("%v", this.GroupNames), "OptionalNames", "OptionalNames", 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, + `RoleRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RoleRef), "ObjectReference", "v12.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterRoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterRoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterRole{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterRoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *GroupRestriction) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]LabelSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForSelectors += "}" + s := strings.Join([]string{`&GroupRestriction{`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Selectors:` + repeatedStringForSelectors + `,`, + `}`, + }, "") + return s +} +func (this *IsPersonalSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IsPersonalSubjectAccessReview{`, + `}`, + }, "") + return s +} +func (this *LocalResourceAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalResourceAccessReview{`, + `Action:` + strings.Replace(strings.Replace(this.Action.String(), "Action", "Action", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LocalSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalSubjectAccessReview{`, + `Action:` + strings.Replace(strings.Replace(this.Action.String(), "Action", "Action", 1), `&`, ``, 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `GroupsSlice:` + fmt.Sprintf("%v", this.GroupsSlice) + `,`, + `Scopes:` + strings.Replace(fmt.Sprintf("%v", this.Scopes), "OptionalScopes", "OptionalScopes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedClusterRole) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedClusterRole{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Role:` + strings.Replace(strings.Replace(this.Role.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedClusterRoleBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedClusterRoleBinding{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RoleBinding:` + strings.Replace(strings.Replace(this.RoleBinding.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedRole) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedRole{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Role:` + strings.Replace(strings.Replace(this.Role.String(), "Role", "Role", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedRoleBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedRoleBinding{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RoleBinding:` + strings.Replace(strings.Replace(this.RoleBinding.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `AttributeRestrictions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AttributeRestrictions), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `NonResourceURLsSlice:` + fmt.Sprintf("%v", this.NonResourceURLsSlice) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceAccessReview{`, + `Action:` + strings.Replace(strings.Replace(this.Action.String(), "Action", "Action", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAccessReviewResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceAccessReviewResponse{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `UsersSlice:` + fmt.Sprintf("%v", this.UsersSlice) + `,`, + `GroupsSlice:` + fmt.Sprintf("%v", this.GroupsSlice) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `}`, + }, "") + return s +} +func (this *Role) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&Role{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *RoleBinding) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]ObjectReference{" + for _, f := range this.Subjects { + repeatedStringForSubjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForSubjects += "}" + s := strings.Join([]string{`&RoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `UserNames:` + strings.Replace(fmt.Sprintf("%v", this.UserNames), "OptionalNames", "OptionalNames", 1) + `,`, + `GroupNames:` + strings.Replace(fmt.Sprintf("%v", this.GroupNames), "OptionalNames", "OptionalNames", 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, + `RoleRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RoleRef), "ObjectReference", "v12.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingRestriction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleBindingRestriction{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "RoleBindingRestrictionSpec", "RoleBindingRestrictionSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingRestrictionList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RoleBindingRestriction{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBindingRestriction", "RoleBindingRestriction", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RoleBindingRestrictionList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingRestrictionSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleBindingRestrictionSpec{`, + `UserRestriction:` + strings.Replace(this.UserRestriction.String(), "UserRestriction", "UserRestriction", 1) + `,`, + `GroupRestriction:` + strings.Replace(this.GroupRestriction.String(), "GroupRestriction", "GroupRestriction", 1) + `,`, + `ServiceAccountRestriction:` + strings.Replace(this.ServiceAccountRestriction.String(), "ServiceAccountRestriction", "ServiceAccountRestriction", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Role{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectRulesReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectRulesReview{`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectRulesReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectRulesReviewSpec{`, + `Scopes:` + strings.Replace(fmt.Sprintf("%v", this.Scopes), "OptionalScopes", "OptionalScopes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountRestriction) String() string { + if this == nil { + return "nil" + } + repeatedStringForServiceAccounts := "[]ServiceAccountReference{" + for _, f := range this.ServiceAccounts { + repeatedStringForServiceAccounts += strings.Replace(strings.Replace(f.String(), "ServiceAccountReference", "ServiceAccountReference", 1), `&`, ``, 1) + "," + } + repeatedStringForServiceAccounts += "}" + s := strings.Join([]string{`&ServiceAccountRestriction{`, + `ServiceAccounts:` + repeatedStringForServiceAccounts + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReview{`, + `Action:` + strings.Replace(strings.Replace(this.Action.String(), "Action", "Action", 1), `&`, ``, 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `GroupsSlice:` + fmt.Sprintf("%v", this.GroupsSlice) + `,`, + `Scopes:` + strings.Replace(fmt.Sprintf("%v", this.Scopes), "OptionalScopes", "OptionalScopes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReviewResponse{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectRulesReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectRulesReview{`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectRulesReviewSpec", "SubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectRulesReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectRulesReviewSpec{`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Scopes:` + strings.Replace(fmt.Sprintf("%v", this.Scopes), "OptionalScopes", "OptionalScopes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectRulesReviewStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&SubjectRulesReviewStatus{`, + `Rules:` + repeatedStringForRules + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `}`, + }, "") + return s +} +func (this *UserRestriction) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]LabelSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForSelectors += "}" + s := strings.Join([]string{`&UserRestriction{`, + `Users:` + fmt.Sprintf("%v", this.Users) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Selectors:` + repeatedStringForSelectors + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Action) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Action: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Action: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Content", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Content.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsNonResourceURL", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsNonResourceURL = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AggregationRule == nil { + m.AggregationRule = &v11.AggregationRule{} + } + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UserNames == nil { + m.UserNames = OptionalNames{} + } + if err := m.UserNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GroupNames == nil { + m.GroupNames = OptionalNames{} + } + if err := m.GroupNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, v12.ObjectReference{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRole{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupRestriction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupRestriction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupRestriction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, v1.LabelSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IsPersonalSubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IsPersonalSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IsPersonalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalResourceAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalResourceAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalResourceAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupsSlice", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupsSlice = append(m.GroupsSlice, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scopes == nil { + m.Scopes = OptionalScopes{} + } + if err := m.Scopes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedClusterRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedClusterRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Role.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedClusterRoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedClusterRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleBinding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleBinding.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Role.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedRoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleBinding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleBinding.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalNames) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalNames: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalNames: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalScopes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalScopes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalScopes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeRestrictions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AttributeRestrictions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLsSlice", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLsSlice = append(m.NonResourceURLsSlice, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAccessReviewResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAccessReviewResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAccessReviewResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsersSlice", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UsersSlice = append(m.UsersSlice, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupsSlice", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupsSlice = append(m.GroupsSlice, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UserNames == nil { + m.UserNames = OptionalNames{} + } + if err := m.UserNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GroupNames == nil { + m.GroupNames = OptionalNames{} + } + if err := m.GroupNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, v12.ObjectReference{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingRestriction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingRestriction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingRestriction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingRestrictionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingRestrictionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingRestrictionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RoleBindingRestriction{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingRestrictionSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingRestrictionSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingRestrictionSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserRestriction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UserRestriction == nil { + m.UserRestriction = &UserRestriction{} + } + if err := m.UserRestriction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupRestriction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GroupRestriction == nil { + m.GroupRestriction = &GroupRestriction{} + } + if err := m.GroupRestriction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountRestriction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceAccountRestriction == nil { + m.ServiceAccountRestriction = &ServiceAccountRestriction{} + } + if err := m.ServiceAccountRestriction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Role{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectRulesReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectRulesReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scopes == nil { + m.Scopes = OptionalScopes{} + } + if err := m.Scopes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountRestriction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountRestriction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountRestriction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccounts = append(m.ServiceAccounts, ServiceAccountReference{}) + if err := m.ServiceAccounts[len(m.ServiceAccounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupsSlice", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupsSlice = append(m.GroupsSlice, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scopes == nil { + m.Scopes = OptionalScopes{} + } + if err := m.Scopes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectRulesReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectRulesReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectRulesReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectRulesReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectRulesReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scopes == nil { + m.Scopes = OptionalScopes{} + } + if err := m.Scopes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectRulesReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectRulesReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserRestriction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserRestriction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserRestriction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Users = append(m.Users, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, v1.LabelSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/openshift/api/authorization/v1/generated.proto b/vendor/github.com/openshift/api/authorization/v1/generated.proto new file mode 100644 index 0000000000000000000000000000000000000000..49b26c1d72f8e5078726c8beb0806615b28ab5f8 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/generated.proto @@ -0,0 +1,478 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package github.com.openshift.api.authorization.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/api/rbac/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Action describes a request to the API server +message Action { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + optional string namespace = 1; + + // Verb is one of: get, list, watch, create, update, delete + optional string verb = 2; + + // Group is the API group of the resource + // Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined + optional string resourceAPIGroup = 3; + + // Version is the API version of the resource + // Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined + optional string resourceAPIVersion = 4; + + // Resource is one of the existing resource types + optional string resource = 5; + + // ResourceName is the name of the resource being requested for a "get" or deleted for a "delete" + optional string resourceName = 6; + + // Path is the path of a non resource URL + optional string path = 8; + + // IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy) + optional bool isNonResourceURL = 9; + + // Content is the actual content of the request for create and update + // +kubebuilder:pruning:PreserveUnknownFields + optional k8s.io.apimachinery.pkg.runtime.RawExtension content = 7; +} + +// ClusterRole is a logical grouping of PolicyRules that can be referenced as a unit by ClusterRoleBindings. +message ClusterRole { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this ClusterRole + repeated PolicyRule rules = 2; + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + optional k8s.io.api.rbac.v1.AggregationRule aggregationRule = 3; +} + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference any ClusterRole in the same namespace or in the global namespace. +// It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. +// ClusterRoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces). +message ClusterRoleBinding { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // UserNames holds all the usernames directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + optional OptionalNames userNames = 2; + + // GroupNames holds all the groups directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + optional OptionalNames groupNames = 3; + + // Subjects hold object references to authorize with this rule. + // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. + // Thus newer clients that do not need to support backwards compatibility should send + // only fully qualified Subjects and should omit the UserNames and GroupNames fields. + // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. + repeated k8s.io.api.core.v1.ObjectReference subjects = 4; + + // RoleRef can only reference the current namespace and the global namespace. + // If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. + // Since Policy is a singleton, this is sufficient knowledge to locate a role. + optional k8s.io.api.core.v1.ObjectReference roleRef = 5; +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +message ClusterRoleBindingList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoleBindings + repeated ClusterRoleBinding items = 2; +} + +// ClusterRoleList is a collection of ClusterRoles +message ClusterRoleList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoles + repeated ClusterRole items = 2; +} + +// GroupRestriction matches a group either by a string match on the group name +// or a label selector applied to group labels. +message GroupRestriction { + // Groups is a list of groups used to match against an individual user's + // groups. If the user is a member of one of the whitelisted groups, the user + // is allowed to be bound to a role. + // +nullable + repeated string groups = 1; + + // Selectors specifies a list of label selectors over group labels. + // +nullable + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labels = 2; +} + +// IsPersonalSubjectAccessReview is a marker for PolicyRule.AttributeRestrictions that denotes that subjectaccessreviews on self should be allowed +message IsPersonalSubjectAccessReview { +} + +// LocalResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec in a particular namespace +message LocalResourceAccessReview { + // Action describes the action being tested. The Namespace element is FORCED to the current namespace. + optional Action Action = 1; +} + +// LocalSubjectAccessReview is an object for requesting information about whether a user or group can perform an action in a particular namespace +message LocalSubjectAccessReview { + // Action describes the action being tested. The Namespace element is FORCED to the current namespace. + optional Action Action = 1; + + // User is optional. If both User and Groups are empty, the current authenticated user is used. + optional string user = 2; + + // Groups is optional. Groups is the list of groups to which the User belongs. + // +k8s:conversion-gen=false + repeated string groups = 3; + + // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // Nil for a self-SAR, means "use the scopes on this request". + // Nil for a regular SAR, means the same as empty. + // +k8s:conversion-gen=false + optional OptionalScopes scopes = 4; +} + +// NamedClusterRole relates a name with a cluster role +message NamedClusterRole { + // Name is the name of the cluster role + optional string name = 1; + + // Role is the cluster role being named + optional ClusterRole role = 2; +} + +// NamedClusterRoleBinding relates a name with a cluster role binding +message NamedClusterRoleBinding { + // Name is the name of the cluster role binding + optional string name = 1; + + // RoleBinding is the cluster role binding being named + optional ClusterRoleBinding roleBinding = 2; +} + +// NamedRole relates a Role with a name +message NamedRole { + // Name is the name of the role + optional string name = 1; + + // Role is the role being named + optional Role role = 2; +} + +// NamedRoleBinding relates a role binding with a name +message NamedRoleBinding { + // Name is the name of the role binding + optional string name = 1; + + // RoleBinding is the role binding being named + optional RoleBinding roleBinding = 2; +} + +// OptionalNames is an array that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message OptionalNames { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// OptionalScopes is an array that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message OptionalScopes { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +message PolicyRule { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + repeated string verbs = 1; + + // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. + // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. + // +kubebuilder:pruning:PreserveUnknownFields + optional k8s.io.apimachinery.pkg.runtime.RawExtension attributeRestrictions = 2; + + // APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. + // That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request + // will be allowed + repeated string apiGroups = 3; + + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + repeated string resources = 4; + + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + repeated string resourceNames = 5; + + // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. + repeated string nonResourceURLs = 6; +} + +// ResourceAccessReview is a means to request a list of which users and groups are authorized to perform the +// action specified by spec +message ResourceAccessReview { + // Action describes the action being tested. + optional Action Action = 1; +} + +// ResourceAccessReviewResponse describes who can perform the action +message ResourceAccessReviewResponse { + // Namespace is the namespace used for the access review + optional string namespace = 1; + + // UsersSlice is the list of users who can perform the action + // +k8s:conversion-gen=false + repeated string users = 2; + + // GroupsSlice is the list of groups who can perform the action + // +k8s:conversion-gen=false + repeated string groups = 3; + + // EvaluationError is an indication that some error occurred during resolution, but partial results can still be returned. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is + // most common when a bound role is missing, but enough roles are still present and bound to reason about the request. + optional string evalutionError = 4; +} + +// Role is a logical grouping of PolicyRules that can be referenced as a unit by RoleBindings. +message Role { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this Role + repeated PolicyRule rules = 2; +} + +// RoleBinding references a Role, but not contain it. It can reference any Role in the same namespace or in the global namespace. +// It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. +// RoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces). +message RoleBinding { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // UserNames holds all the usernames directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + optional OptionalNames userNames = 2; + + // GroupNames holds all the groups directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + optional OptionalNames groupNames = 3; + + // Subjects hold object references to authorize with this rule. + // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. + // Thus newer clients that do not need to support backwards compatibility should send + // only fully qualified Subjects and should omit the UserNames and GroupNames fields. + // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. + repeated k8s.io.api.core.v1.ObjectReference subjects = 4; + + // RoleRef can only reference the current namespace and the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + // Since Policy is a singleton, this is sufficient knowledge to locate a role. + optional k8s.io.api.core.v1.ObjectReference roleRef = 5; +} + +// RoleBindingList is a collection of RoleBindings +message RoleBindingList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of RoleBindings + repeated RoleBinding items = 2; +} + +// RoleBindingRestriction is an object that can be matched against a subject +// (user, group, or service account) to determine whether rolebindings on that +// subject are allowed in the namespace to which the RoleBindingRestriction +// belongs. If any one of those RoleBindingRestriction objects matches +// a subject, rolebindings on that subject in the namespace are allowed. +message RoleBindingRestriction { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the matcher. + optional RoleBindingRestrictionSpec spec = 2; +} + +// RoleBindingRestrictionList is a collection of RoleBindingRestriction objects. +message RoleBindingRestrictionList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of RoleBindingRestriction objects. + repeated RoleBindingRestriction items = 2; +} + +// RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one +// field must be non-nil. +message RoleBindingRestrictionSpec { + // UserRestriction matches against user subjects. + // +nullable + optional UserRestriction userrestriction = 1; + + // GroupRestriction matches against group subjects. + // +nullable + optional GroupRestriction grouprestriction = 2; + + // ServiceAccountRestriction matches against service-account subjects. + // +nullable + optional ServiceAccountRestriction serviceaccountrestriction = 3; +} + +// RoleList is a collection of Roles +message RoleList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of Roles + repeated Role items = 2; +} + +// SelfSubjectRulesReview is a resource you can create to determine which actions you can perform in a namespace +message SelfSubjectRulesReview { + // Spec adds information about how to conduct the check + optional SelfSubjectRulesReviewSpec spec = 1; + + // Status is completed by the server to tell which permissions you have + optional SubjectRulesReviewStatus status = 2; +} + +// SelfSubjectRulesReviewSpec adds information about how to conduct the check +message SelfSubjectRulesReviewSpec { + // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // Nil means "use the scopes on this request". + // +k8s:conversion-gen=false + optional OptionalScopes scopes = 1; +} + +// ServiceAccountReference specifies a service account and namespace by their +// names. +message ServiceAccountReference { + // Name is the name of the service account. + optional string name = 1; + + // Namespace is the namespace of the service account. Service accounts from + // inside the whitelisted namespaces are allowed to be bound to roles. If + // Namespace is empty, then the namespace of the RoleBindingRestriction in + // which the ServiceAccountReference is embedded is used. + optional string namespace = 2; +} + +// ServiceAccountRestriction matches a service account by a string match on +// either the service-account name or the name of the service account's +// namespace. +message ServiceAccountRestriction { + // ServiceAccounts specifies a list of literal service-account names. + repeated ServiceAccountReference serviceaccounts = 1; + + // Namespaces specifies a list of literal namespace names. + repeated string namespaces = 2; +} + +// SubjectAccessReview is an object for requesting information about whether a user or group can perform an action +message SubjectAccessReview { + // Action describes the action being tested. + optional Action Action = 1; + + // User is optional. If both User and Groups are empty, the current authenticated user is used. + optional string user = 2; + + // GroupsSlice is optional. Groups is the list of groups to which the User belongs. + // +k8s:conversion-gen=false + repeated string groups = 3; + + // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // Nil for a self-SAR, means "use the scopes on this request". + // Nil for a regular SAR, means the same as empty. + // +k8s:conversion-gen=false + optional OptionalScopes scopes = 4; +} + +// SubjectAccessReviewResponse describes whether or not a user or group can perform an action +message SubjectAccessReviewResponse { + // Namespace is the namespace used for the access review + optional string namespace = 1; + + // Allowed is required. True if the action would be allowed, false otherwise. + optional bool allowed = 2; + + // Reason is optional. It indicates why a request was allowed or denied. + optional string reason = 3; + + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is + // most common when a bound role is missing, but enough roles are still present and bound to reason about the request. + optional string evaluationError = 4; +} + +// SubjectRulesReview is a resource you can create to determine which actions another user can perform in a namespace +message SubjectRulesReview { + // Spec adds information about how to conduct the check + optional SubjectRulesReviewSpec spec = 1; + + // Status is completed by the server to tell which permissions you have + optional SubjectRulesReviewStatus status = 2; +} + +// SubjectRulesReviewSpec adds information about how to conduct the check +message SubjectRulesReviewSpec { + // User is optional. At least one of User and Groups must be specified. + optional string user = 1; + + // Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified. + repeated string groups = 2; + + // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + optional OptionalScopes scopes = 3; +} + +// SubjectRulesReviewStatus is contains the result of a rules check +message SubjectRulesReviewStatus { + // Rules is the list of rules (no particular sort) that are allowed for the subject + repeated PolicyRule rules = 1; + + // EvaluationError can appear in combination with Rules. It means some error happened during evaluation + // that may have prevented additional rules from being populated. + optional string evaluationError = 2; +} + +// UserRestriction matches a user either by a string match on the user name, +// a string match on the name of a group to which the user belongs, or a label +// selector applied to the user labels. +message UserRestriction { + // Users specifies a list of literal user names. + repeated string users = 1; + + // Groups specifies a list of literal group names. + // +nullable + repeated string groups = 2; + + // Selectors specifies a list of label selectors over user labels. + // +nullable + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labels = 3; +} + diff --git a/vendor/github.com/openshift/api/authorization/v1/legacy.go b/vendor/github.com/openshift/api/authorization/v1/legacy.go new file mode 100644 index 0000000000000000000000000000000000000000..f437a242ea8cb4c5d5a0c8b984dbff5f14d6d7d2 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/legacy.go @@ -0,0 +1,43 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme, rbacv1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &SelfSubjectRulesReview{}, + &SubjectRulesReview{}, + &ResourceAccessReview{}, + &SubjectAccessReview{}, + &LocalResourceAccessReview{}, + &LocalSubjectAccessReview{}, + &ResourceAccessReviewResponse{}, + &SubjectAccessReviewResponse{}, + &IsPersonalSubjectAccessReview{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + + &RoleBindingRestriction{}, + &RoleBindingRestrictionList{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/authorization/v1/register.go b/vendor/github.com/openshift/api/authorization/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..f1e12477b6516954539ccecfb99f13350394022e --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/register.go @@ -0,0 +1,60 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "authorization.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme, rbacv1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &SelfSubjectRulesReview{}, + &SubjectRulesReview{}, + &ResourceAccessReview{}, + &SubjectAccessReview{}, + &LocalResourceAccessReview{}, + &LocalSubjectAccessReview{}, + &ResourceAccessReviewResponse{}, + &SubjectAccessReviewResponse{}, + &IsPersonalSubjectAccessReview{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + + &RoleBindingRestriction{}, + &RoleBindingRestrictionList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/authorization/v1/types.go b/vendor/github.com/openshift/api/authorization/v1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..00c42e8be00837520e67494ad915515fb5e0241a --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/types.go @@ -0,0 +1,543 @@ +package v1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kruntime "k8s.io/apimachinery/pkg/runtime" +) + +// Authorization is calculated against +// 1. all deny RoleBinding PolicyRules in the master namespace - short circuit on match +// 2. all allow RoleBinding PolicyRules in the master namespace - short circuit on match +// 3. all deny RoleBinding PolicyRules in the namespace - short circuit on match +// 4. all allow RoleBinding PolicyRules in the namespace - short circuit on match +// 5. deny by default + +const ( + // GroupKind is string representation of kind used in role binding subjects that represents the "group". + GroupKind = "Group" + // UserKind is string representation of kind used in role binding subjects that represents the "user". + UserKind = "User" + + ScopesKey = "scopes.authorization.openshift.io" +) + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +type PolicyRule struct { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. + // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. + // +kubebuilder:pruning:PreserveUnknownFields + AttributeRestrictions kruntime.RawExtension `json:"attributeRestrictions,omitempty" protobuf:"bytes,2,opt,name=attributeRestrictions"` + // APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. + // That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request + // will be allowed + APIGroups []string `json:"apiGroups" protobuf:"bytes,3,rep,name=apiGroups"` + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + Resources []string `json:"resources" protobuf:"bytes,4,rep,name=resources"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"` + // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. + NonResourceURLsSlice []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,6,rep,name=nonResourceURLs"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IsPersonalSubjectAccessReview is a marker for PolicyRule.AttributeRestrictions that denotes that subjectaccessreviews on self should be allowed +type IsPersonalSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Role is a logical grouping of PolicyRules that can be referenced as a unit by RoleBindings. +type Role struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this Role + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// OptionalNames is an array that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type OptionalNames []string + +func (t OptionalNames) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBinding references a Role, but not contain it. It can reference any Role in the same namespace or in the global namespace. +// It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. +// RoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces). +type RoleBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // UserNames holds all the usernames directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + UserNames OptionalNames `json:"userNames" protobuf:"bytes,2,rep,name=userNames"` + // GroupNames holds all the groups directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + GroupNames OptionalNames `json:"groupNames" protobuf:"bytes,3,rep,name=groupNames"` + // Subjects hold object references to authorize with this rule. + // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. + // Thus newer clients that do not need to support backwards compatibility should send + // only fully qualified Subjects and should omit the UserNames and GroupNames fields. + // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. + Subjects []corev1.ObjectReference `json:"subjects" protobuf:"bytes,4,rep,name=subjects"` + + // RoleRef can only reference the current namespace and the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + // Since Policy is a singleton, this is sufficient knowledge to locate a role. + RoleRef corev1.ObjectReference `json:"roleRef" protobuf:"bytes,5,opt,name=roleRef"` +} + +// NamedRole relates a Role with a name +type NamedRole struct { + // Name is the name of the role + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Role is the role being named + Role Role `json:"role" protobuf:"bytes,2,opt,name=role"` +} + +// NamedRoleBinding relates a role binding with a name +type NamedRoleBinding struct { + // Name is the name of the role binding + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // RoleBinding is the role binding being named + RoleBinding RoleBinding `json:"roleBinding" protobuf:"bytes,2,opt,name=roleBinding"` +} + +// +genclient +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SelfSubjectRulesReview is a resource you can create to determine which actions you can perform in a namespace +type SelfSubjectRulesReview struct { + metav1.TypeMeta `json:",inline"` + + // Spec adds information about how to conduct the check + Spec SelfSubjectRulesReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` + + // Status is completed by the server to tell which permissions you have + Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// SelfSubjectRulesReviewSpec adds information about how to conduct the check +type SelfSubjectRulesReviewSpec struct { + // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // Nil means "use the scopes on this request". + // +k8s:conversion-gen=false + Scopes OptionalScopes `json:"scopes" protobuf:"bytes,1,rep,name=scopes"` +} + +// +genclient +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SubjectRulesReview is a resource you can create to determine which actions another user can perform in a namespace +type SubjectRulesReview struct { + metav1.TypeMeta `json:",inline"` + + // Spec adds information about how to conduct the check + Spec SubjectRulesReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` + + // Status is completed by the server to tell which permissions you have + Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// SubjectRulesReviewSpec adds information about how to conduct the check +type SubjectRulesReviewSpec struct { + // User is optional. At least one of User and Groups must be specified. + User string `json:"user" protobuf:"bytes,1,opt,name=user"` + // Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified. + Groups []string `json:"groups" protobuf:"bytes,2,rep,name=groups"` + // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + Scopes OptionalScopes `json:"scopes" protobuf:"bytes,3,opt,name=scopes"` +} + +// SubjectRulesReviewStatus is contains the result of a rules check +type SubjectRulesReviewStatus struct { + // Rules is the list of rules (no particular sort) that are allowed for the subject + Rules []PolicyRule `json:"rules" protobuf:"bytes,1,rep,name=rules"` + // EvaluationError can appear in combination with Rules. It means some error happened during evaluation + // that may have prevented additional rules from being populated. + EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,2,opt,name=evaluationError"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceAccessReviewResponse describes who can perform the action +type ResourceAccessReviewResponse struct { + metav1.TypeMeta `json:",inline"` + + // Namespace is the namespace used for the access review + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + // UsersSlice is the list of users who can perform the action + // +k8s:conversion-gen=false + UsersSlice []string `json:"users" protobuf:"bytes,2,rep,name=users"` + // GroupsSlice is the list of groups who can perform the action + // +k8s:conversion-gen=false + GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"` + + // EvaluationError is an indication that some error occurred during resolution, but partial results can still be returned. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is + // most common when a bound role is missing, but enough roles are still present and bound to reason about the request. + EvaluationError string `json:"evalutionError" protobuf:"bytes,4,opt,name=evalutionError"` +} + +// +genclient +// +genclient:nonNamespaced +// +genclient:skipVerbs=get,list,create,update,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=ResourceAccessReviewResponse +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceAccessReview is a means to request a list of which users and groups are authorized to perform the +// action specified by spec +type ResourceAccessReview struct { + metav1.TypeMeta `json:",inline"` + + // Action describes the action being tested. + Action `json:",inline" protobuf:"bytes,1,opt,name=Action"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SubjectAccessReviewResponse describes whether or not a user or group can perform an action +type SubjectAccessReviewResponse struct { + metav1.TypeMeta `json:",inline"` + + // Namespace is the namespace used for the access review + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + // Allowed is required. True if the action would be allowed, false otherwise. + Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"` + // Reason is optional. It indicates why a request was allowed or denied. + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is + // most common when a bound role is missing, but enough roles are still present and bound to reason about the request. + EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,4,opt,name=evaluationError"` +} + +// OptionalScopes is an array that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type OptionalScopes []string + +func (t OptionalScopes) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// +genclient +// +genclient:nonNamespaced +// +genclient:skipVerbs=get,list,create,update,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=SubjectAccessReviewResponse +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SubjectAccessReview is an object for requesting information about whether a user or group can perform an action +type SubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + + // Action describes the action being tested. + Action `json:",inline" protobuf:"bytes,1,opt,name=Action"` + // User is optional. If both User and Groups are empty, the current authenticated user is used. + User string `json:"user" protobuf:"bytes,2,opt,name=user"` + // GroupsSlice is optional. Groups is the list of groups to which the User belongs. + // +k8s:conversion-gen=false + GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"` + // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // Nil for a self-SAR, means "use the scopes on this request". + // Nil for a regular SAR, means the same as empty. + // +k8s:conversion-gen=false + Scopes OptionalScopes `json:"scopes" protobuf:"bytes,4,rep,name=scopes"` +} + +// +genclient +// +genclient:skipVerbs=get,list,create,update,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=ResourceAccessReviewResponse +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LocalResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec in a particular namespace +type LocalResourceAccessReview struct { + metav1.TypeMeta `json:",inline"` + + // Action describes the action being tested. The Namespace element is FORCED to the current namespace. + Action `json:",inline" protobuf:"bytes,1,opt,name=Action"` +} + +// +genclient +// +genclient:skipVerbs=get,list,create,update,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=SubjectAccessReviewResponse +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LocalSubjectAccessReview is an object for requesting information about whether a user or group can perform an action in a particular namespace +type LocalSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + + // Action describes the action being tested. The Namespace element is FORCED to the current namespace. + Action `json:",inline" protobuf:"bytes,1,opt,name=Action"` + // User is optional. If both User and Groups are empty, the current authenticated user is used. + User string `json:"user" protobuf:"bytes,2,opt,name=user"` + // Groups is optional. Groups is the list of groups to which the User belongs. + // +k8s:conversion-gen=false + GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"` + // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // Nil for a self-SAR, means "use the scopes on this request". + // Nil for a regular SAR, means the same as empty. + // +k8s:conversion-gen=false + Scopes OptionalScopes `json:"scopes" protobuf:"bytes,4,rep,name=scopes"` +} + +// Action describes a request to the API server +type Action struct { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // Verb is one of: get, list, watch, create, update, delete + Verb string `json:"verb" protobuf:"bytes,2,opt,name=verb"` + // Group is the API group of the resource + // Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined + Group string `json:"resourceAPIGroup" protobuf:"bytes,3,opt,name=resourceAPIGroup"` + // Version is the API version of the resource + // Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined + Version string `json:"resourceAPIVersion" protobuf:"bytes,4,opt,name=resourceAPIVersion"` + // Resource is one of the existing resource types + Resource string `json:"resource" protobuf:"bytes,5,opt,name=resource"` + // ResourceName is the name of the resource being requested for a "get" or deleted for a "delete" + ResourceName string `json:"resourceName" protobuf:"bytes,6,opt,name=resourceName"` + // Path is the path of a non resource URL + Path string `json:"path" protobuf:"bytes,8,opt,name=path"` + // IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy) + IsNonResourceURL bool `json:"isNonResourceURL" protobuf:"varint,9,opt,name=isNonResourceURL"` + // Content is the actual content of the request for create and update + // +kubebuilder:pruning:PreserveUnknownFields + Content kruntime.RawExtension `json:"content,omitempty" protobuf:"bytes,7,opt,name=content"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBindingList is a collection of RoleBindings +type RoleBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of RoleBindings + Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleList is a collection of Roles +type RoleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of Roles + Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRole is a logical grouping of PolicyRules that can be referenced as a unit by ClusterRoleBindings. +type ClusterRole struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this ClusterRole + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + AggregationRule *rbacv1.AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference any ClusterRole in the same namespace or in the global namespace. +// It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. +// ClusterRoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces). +type ClusterRoleBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // UserNames holds all the usernames directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + UserNames OptionalNames `json:"userNames" protobuf:"bytes,2,rep,name=userNames"` + // GroupNames holds all the groups directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + GroupNames OptionalNames `json:"groupNames" protobuf:"bytes,3,rep,name=groupNames"` + // Subjects hold object references to authorize with this rule. + // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. + // Thus newer clients that do not need to support backwards compatibility should send + // only fully qualified Subjects and should omit the UserNames and GroupNames fields. + // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. + Subjects []corev1.ObjectReference `json:"subjects" protobuf:"bytes,4,rep,name=subjects"` + + // RoleRef can only reference the current namespace and the global namespace. + // If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. + // Since Policy is a singleton, this is sufficient knowledge to locate a role. + RoleRef corev1.ObjectReference `json:"roleRef" protobuf:"bytes,5,opt,name=roleRef"` +} + +// NamedClusterRole relates a name with a cluster role +type NamedClusterRole struct { + // Name is the name of the cluster role + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Role is the cluster role being named + Role ClusterRole `json:"role" protobuf:"bytes,2,opt,name=role"` +} + +// NamedClusterRoleBinding relates a name with a cluster role binding +type NamedClusterRoleBinding struct { + // Name is the name of the cluster role binding + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // RoleBinding is the cluster role binding being named + RoleBinding ClusterRoleBinding `json:"roleBinding" protobuf:"bytes,2,opt,name=roleBinding"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +type ClusterRoleBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoleBindings + Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRoleList is a collection of ClusterRoles +type ClusterRoleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoles + Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBindingRestriction is an object that can be matched against a subject +// (user, group, or service account) to determine whether rolebindings on that +// subject are allowed in the namespace to which the RoleBindingRestriction +// belongs. If any one of those RoleBindingRestriction objects matches +// a subject, rolebindings on that subject in the namespace are allowed. +type RoleBindingRestriction struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the matcher. + Spec RoleBindingRestrictionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one +// field must be non-nil. +type RoleBindingRestrictionSpec struct { + // UserRestriction matches against user subjects. + // +nullable + UserRestriction *UserRestriction `json:"userrestriction" protobuf:"bytes,1,opt,name=userrestriction"` + + // GroupRestriction matches against group subjects. + // +nullable + GroupRestriction *GroupRestriction `json:"grouprestriction" protobuf:"bytes,2,opt,name=grouprestriction"` + + // ServiceAccountRestriction matches against service-account subjects. + // +nullable + ServiceAccountRestriction *ServiceAccountRestriction `json:"serviceaccountrestriction" protobuf:"bytes,3,opt,name=serviceaccountrestriction"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBindingRestrictionList is a collection of RoleBindingRestriction objects. +type RoleBindingRestrictionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of RoleBindingRestriction objects. + Items []RoleBindingRestriction `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// UserRestriction matches a user either by a string match on the user name, +// a string match on the name of a group to which the user belongs, or a label +// selector applied to the user labels. +type UserRestriction struct { + // Users specifies a list of literal user names. + Users []string `json:"users" protobuf:"bytes,1,rep,name=users"` + + // Groups specifies a list of literal group names. + // +nullable + Groups []string `json:"groups" protobuf:"bytes,2,rep,name=groups"` + + // Selectors specifies a list of label selectors over user labels. + // +nullable + Selectors []metav1.LabelSelector `json:"labels" protobuf:"bytes,3,rep,name=labels"` +} + +// GroupRestriction matches a group either by a string match on the group name +// or a label selector applied to group labels. +type GroupRestriction struct { + // Groups is a list of groups used to match against an individual user's + // groups. If the user is a member of one of the whitelisted groups, the user + // is allowed to be bound to a role. + // +nullable + Groups []string `json:"groups" protobuf:"bytes,1,rep,name=groups"` + + // Selectors specifies a list of label selectors over group labels. + // +nullable + Selectors []metav1.LabelSelector `json:"labels" protobuf:"bytes,2,rep,name=labels"` +} + +// ServiceAccountRestriction matches a service account by a string match on +// either the service-account name or the name of the service account's +// namespace. +type ServiceAccountRestriction struct { + // ServiceAccounts specifies a list of literal service-account names. + ServiceAccounts []ServiceAccountReference `json:"serviceaccounts" protobuf:"bytes,1,rep,name=serviceaccounts"` + + // Namespaces specifies a list of literal namespace names. + Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` +} + +// ServiceAccountReference specifies a service account and namespace by their +// names. +type ServiceAccountReference struct { + // Name is the name of the service account. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Namespace is the namespace of the service account. Service accounts from + // inside the whitelisted namespaces are allowed to be bound to roles. If + // Namespace is empty, then the namespace of the RoleBindingRestriction in + // which the ServiceAccountReference is embedded is used. + Namespace string `json:"namespace" protobuf:"bytes,2,opt,name=namespace"` +} diff --git a/vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..16cbd2e4fb45c3024cd3d982c992b530911818ad --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go @@ -0,0 +1,993 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Action) DeepCopyInto(out *Action) { + *out = *in + in.Content.DeepCopyInto(&out.Content) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Action. +func (in *Action) DeepCopy() *Action { + if in == nil { + return nil + } + out := new(Action) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + *out = new(rbacv1.AggregationRule) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole. +func (in *ClusterRole) DeepCopy() *ClusterRole { + if in == nil { + return nil + } + out := new(ClusterRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRole) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.UserNames != nil { + in, out := &in.UserNames, &out.UserNames + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + } + if in.GroupNames != nil { + in, out := &in.GroupNames, &out.GroupNames + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]corev1.ObjectReference, len(*in)) + copy(*out, *in) + } + out.RoleRef = in.RoleRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding. +func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding { + if in == nil { + return nil + } + out := new(ClusterRoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList. +func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList { + if in == nil { + return nil + } + out := new(ClusterRoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRole, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList. +func (in *ClusterRoleList) DeepCopy() *ClusterRoleList { + if in == nil { + return nil + } + out := new(ClusterRoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupRestriction) DeepCopyInto(out *GroupRestriction) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]metav1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupRestriction. +func (in *GroupRestriction) DeepCopy() *GroupRestriction { + if in == nil { + return nil + } + out := new(GroupRestriction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IsPersonalSubjectAccessReview) DeepCopyInto(out *IsPersonalSubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IsPersonalSubjectAccessReview. +func (in *IsPersonalSubjectAccessReview) DeepCopy() *IsPersonalSubjectAccessReview { + if in == nil { + return nil + } + out := new(IsPersonalSubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IsPersonalSubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalResourceAccessReview) DeepCopyInto(out *LocalResourceAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Action.DeepCopyInto(&out.Action) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalResourceAccessReview. +func (in *LocalResourceAccessReview) DeepCopy() *LocalResourceAccessReview { + if in == nil { + return nil + } + out := new(LocalResourceAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LocalResourceAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Action.DeepCopyInto(&out.Action) + if in.GroupsSlice != nil { + in, out := &in.GroupsSlice, &out.GroupsSlice + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make(OptionalScopes, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSubjectAccessReview. +func (in *LocalSubjectAccessReview) DeepCopy() *LocalSubjectAccessReview { + if in == nil { + return nil + } + out := new(LocalSubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LocalSubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedClusterRole) DeepCopyInto(out *NamedClusterRole) { + *out = *in + in.Role.DeepCopyInto(&out.Role) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedClusterRole. +func (in *NamedClusterRole) DeepCopy() *NamedClusterRole { + if in == nil { + return nil + } + out := new(NamedClusterRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedClusterRoleBinding) DeepCopyInto(out *NamedClusterRoleBinding) { + *out = *in + in.RoleBinding.DeepCopyInto(&out.RoleBinding) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedClusterRoleBinding. +func (in *NamedClusterRoleBinding) DeepCopy() *NamedClusterRoleBinding { + if in == nil { + return nil + } + out := new(NamedClusterRoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedRole) DeepCopyInto(out *NamedRole) { + *out = *in + in.Role.DeepCopyInto(&out.Role) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRole. +func (in *NamedRole) DeepCopy() *NamedRole { + if in == nil { + return nil + } + out := new(NamedRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedRoleBinding) DeepCopyInto(out *NamedRoleBinding) { + *out = *in + in.RoleBinding.DeepCopyInto(&out.RoleBinding) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRoleBinding. +func (in *NamedRoleBinding) DeepCopy() *NamedRoleBinding { + if in == nil { + return nil + } + out := new(NamedRoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in OptionalNames) DeepCopyInto(out *OptionalNames) { + { + in := &in + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalNames. +func (in OptionalNames) DeepCopy() OptionalNames { + if in == nil { + return nil + } + out := new(OptionalNames) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in OptionalScopes) DeepCopyInto(out *OptionalScopes) { + { + in := &in + *out = make(OptionalScopes, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalScopes. +func (in OptionalScopes) DeepCopy() OptionalScopes { + if in == nil { + return nil + } + out := new(OptionalScopes) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.AttributeRestrictions.DeepCopyInto(&out.AttributeRestrictions) + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLsSlice != nil { + in, out := &in.NonResourceURLsSlice, &out.NonResourceURLsSlice + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. +func (in *PolicyRule) DeepCopy() *PolicyRule { + if in == nil { + return nil + } + out := new(PolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAccessReview) DeepCopyInto(out *ResourceAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Action.DeepCopyInto(&out.Action) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAccessReview. +func (in *ResourceAccessReview) DeepCopy() *ResourceAccessReview { + if in == nil { + return nil + } + out := new(ResourceAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAccessReviewResponse) DeepCopyInto(out *ResourceAccessReviewResponse) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.UsersSlice != nil { + in, out := &in.UsersSlice, &out.UsersSlice + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GroupsSlice != nil { + in, out := &in.GroupsSlice, &out.GroupsSlice + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAccessReviewResponse. +func (in *ResourceAccessReviewResponse) DeepCopy() *ResourceAccessReviewResponse { + if in == nil { + return nil + } + out := new(ResourceAccessReviewResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceAccessReviewResponse) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Role) DeepCopyInto(out *Role) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. +func (in *Role) DeepCopy() *Role { + if in == nil { + return nil + } + out := new(Role) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Role) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBinding) DeepCopyInto(out *RoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.UserNames != nil { + in, out := &in.UserNames, &out.UserNames + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + } + if in.GroupNames != nil { + in, out := &in.GroupNames, &out.GroupNames + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]corev1.ObjectReference, len(*in)) + copy(*out, *in) + } + out.RoleRef = in.RoleRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding. +func (in *RoleBinding) DeepCopy() *RoleBinding { + if in == nil { + return nil + } + out := new(RoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList. +func (in *RoleBindingList) DeepCopy() *RoleBindingList { + if in == nil { + return nil + } + out := new(RoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingRestriction) DeepCopyInto(out *RoleBindingRestriction) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingRestriction. +func (in *RoleBindingRestriction) DeepCopy() *RoleBindingRestriction { + if in == nil { + return nil + } + out := new(RoleBindingRestriction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBindingRestriction) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingRestrictionList) DeepCopyInto(out *RoleBindingRestrictionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBindingRestriction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingRestrictionList. +func (in *RoleBindingRestrictionList) DeepCopy() *RoleBindingRestrictionList { + if in == nil { + return nil + } + out := new(RoleBindingRestrictionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBindingRestrictionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingRestrictionSpec) DeepCopyInto(out *RoleBindingRestrictionSpec) { + *out = *in + if in.UserRestriction != nil { + in, out := &in.UserRestriction, &out.UserRestriction + *out = new(UserRestriction) + (*in).DeepCopyInto(*out) + } + if in.GroupRestriction != nil { + in, out := &in.GroupRestriction, &out.GroupRestriction + *out = new(GroupRestriction) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountRestriction != nil { + in, out := &in.ServiceAccountRestriction, &out.ServiceAccountRestriction + *out = new(ServiceAccountRestriction) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingRestrictionSpec. +func (in *RoleBindingRestrictionSpec) DeepCopy() *RoleBindingRestrictionSpec { + if in == nil { + return nil + } + out := new(RoleBindingRestrictionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleList) DeepCopyInto(out *RoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList. +func (in *RoleList) DeepCopy() *RoleList { + if in == nil { + return nil + } + out := new(RoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectRulesReview) DeepCopyInto(out *SelfSubjectRulesReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReview. +func (in *SelfSubjectRulesReview) DeepCopy() *SelfSubjectRulesReview { + if in == nil { + return nil + } + out := new(SelfSubjectRulesReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectRulesReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectRulesReviewSpec) DeepCopyInto(out *SelfSubjectRulesReviewSpec) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make(OptionalScopes, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReviewSpec. +func (in *SelfSubjectRulesReviewSpec) DeepCopy() *SelfSubjectRulesReviewSpec { + if in == nil { + return nil + } + out := new(SelfSubjectRulesReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountReference) DeepCopyInto(out *ServiceAccountReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountReference. +func (in *ServiceAccountReference) DeepCopy() *ServiceAccountReference { + if in == nil { + return nil + } + out := new(ServiceAccountReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountRestriction) DeepCopyInto(out *ServiceAccountRestriction) { + *out = *in + if in.ServiceAccounts != nil { + in, out := &in.ServiceAccounts, &out.ServiceAccounts + *out = make([]ServiceAccountReference, len(*in)) + copy(*out, *in) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountRestriction. +func (in *ServiceAccountRestriction) DeepCopy() *ServiceAccountRestriction { + if in == nil { + return nil + } + out := new(ServiceAccountRestriction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAccessReview) DeepCopyInto(out *SubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Action.DeepCopyInto(&out.Action) + if in.GroupsSlice != nil { + in, out := &in.GroupsSlice, &out.GroupsSlice + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make(OptionalScopes, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReview. +func (in *SubjectAccessReview) DeepCopy() *SubjectAccessReview { + if in == nil { + return nil + } + out := new(SubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAccessReviewResponse) DeepCopyInto(out *SubjectAccessReviewResponse) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewResponse. +func (in *SubjectAccessReviewResponse) DeepCopy() *SubjectAccessReviewResponse { + if in == nil { + return nil + } + out := new(SubjectAccessReviewResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubjectAccessReviewResponse) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectRulesReview) DeepCopyInto(out *SubjectRulesReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReview. +func (in *SubjectRulesReview) DeepCopy() *SubjectRulesReview { + if in == nil { + return nil + } + out := new(SubjectRulesReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubjectRulesReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectRulesReviewSpec) DeepCopyInto(out *SubjectRulesReviewSpec) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make(OptionalScopes, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewSpec. +func (in *SubjectRulesReviewSpec) DeepCopy() *SubjectRulesReviewSpec { + if in == nil { + return nil + } + out := new(SubjectRulesReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectRulesReviewStatus) DeepCopyInto(out *SubjectRulesReviewStatus) { + *out = *in + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewStatus. +func (in *SubjectRulesReviewStatus) DeepCopy() *SubjectRulesReviewStatus { + if in == nil { + return nil + } + out := new(SubjectRulesReviewStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserRestriction) DeepCopyInto(out *UserRestriction) { + *out = *in + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]metav1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserRestriction. +func (in *UserRestriction) DeepCopy() *UserRestriction { + if in == nil { + return nil + } + out := new(UserRestriction) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..8bce982f19966fb2cdc1bc6a90b660d642de2bf6 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,354 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Action = map[string]string{ + "": "Action describes a request to the API server", + "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces", + "verb": "Verb is one of: get, list, watch, create, update, delete", + "resourceAPIGroup": "Group is the API group of the resource Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined", + "resourceAPIVersion": "Version is the API version of the resource Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined", + "resource": "Resource is one of the existing resource types", + "resourceName": "ResourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"", + "path": "Path is the path of a non resource URL", + "isNonResourceURL": "IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)", + "content": "Content is the actual content of the request for create and update", +} + +func (Action) SwaggerDoc() map[string]string { + return map_Action +} + +var map_ClusterRole = map[string]string{ + "": "ClusterRole is a logical grouping of PolicyRules that can be referenced as a unit by ClusterRoleBindings.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", +} + +func (ClusterRole) SwaggerDoc() map[string]string { + return map_ClusterRole +} + +var map_ClusterRoleBinding = map[string]string{ + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference any ClusterRole in the same namespace or in the global namespace. It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. ClusterRoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).", + "userNames": "UserNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "groupNames": "GroupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "subjects": "Subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.", + "roleRef": "RoleRef can only reference the current namespace and the global namespace. If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.", +} + +func (ClusterRoleBinding) SwaggerDoc() map[string]string { + return map_ClusterRoleBinding +} + +var map_ClusterRoleBindingList = map[string]string{ + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "items": "Items is a list of ClusterRoleBindings", +} + +func (ClusterRoleBindingList) SwaggerDoc() map[string]string { + return map_ClusterRoleBindingList +} + +var map_ClusterRoleList = map[string]string{ + "": "ClusterRoleList is a collection of ClusterRoles", + "items": "Items is a list of ClusterRoles", +} + +func (ClusterRoleList) SwaggerDoc() map[string]string { + return map_ClusterRoleList +} + +var map_GroupRestriction = map[string]string{ + "": "GroupRestriction matches a group either by a string match on the group name or a label selector applied to group labels.", + "groups": "Groups is a list of groups used to match against an individual user's groups. If the user is a member of one of the whitelisted groups, the user is allowed to be bound to a role.", + "labels": "Selectors specifies a list of label selectors over group labels.", +} + +func (GroupRestriction) SwaggerDoc() map[string]string { + return map_GroupRestriction +} + +var map_IsPersonalSubjectAccessReview = map[string]string{ + "": "IsPersonalSubjectAccessReview is a marker for PolicyRule.AttributeRestrictions that denotes that subjectaccessreviews on self should be allowed", +} + +func (IsPersonalSubjectAccessReview) SwaggerDoc() map[string]string { + return map_IsPersonalSubjectAccessReview +} + +var map_LocalResourceAccessReview = map[string]string{ + "": "LocalResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec in a particular namespace", +} + +func (LocalResourceAccessReview) SwaggerDoc() map[string]string { + return map_LocalResourceAccessReview +} + +var map_LocalSubjectAccessReview = map[string]string{ + "": "LocalSubjectAccessReview is an object for requesting information about whether a user or group can perform an action in a particular namespace", + "user": "User is optional. If both User and Groups are empty, the current authenticated user is used.", + "groups": "Groups is optional. Groups is the list of groups to which the User belongs.", + "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.", +} + +func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { + return map_LocalSubjectAccessReview +} + +var map_NamedClusterRole = map[string]string{ + "": "NamedClusterRole relates a name with a cluster role", + "name": "Name is the name of the cluster role", + "role": "Role is the cluster role being named", +} + +func (NamedClusterRole) SwaggerDoc() map[string]string { + return map_NamedClusterRole +} + +var map_NamedClusterRoleBinding = map[string]string{ + "": "NamedClusterRoleBinding relates a name with a cluster role binding", + "name": "Name is the name of the cluster role binding", + "roleBinding": "RoleBinding is the cluster role binding being named", +} + +func (NamedClusterRoleBinding) SwaggerDoc() map[string]string { + return map_NamedClusterRoleBinding +} + +var map_NamedRole = map[string]string{ + "": "NamedRole relates a Role with a name", + "name": "Name is the name of the role", + "role": "Role is the role being named", +} + +func (NamedRole) SwaggerDoc() map[string]string { + return map_NamedRole +} + +var map_NamedRoleBinding = map[string]string{ + "": "NamedRoleBinding relates a role binding with a name", + "name": "Name is the name of the role binding", + "roleBinding": "RoleBinding is the role binding being named", +} + +func (NamedRoleBinding) SwaggerDoc() map[string]string { + return map_NamedRoleBinding +} + +var map_PolicyRule = map[string]string{ + "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "attributeRestrictions": "AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request will be allowed", + "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "nonResourceURLs": "NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.", +} + +func (PolicyRule) SwaggerDoc() map[string]string { + return map_PolicyRule +} + +var map_ResourceAccessReview = map[string]string{ + "": "ResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec", +} + +func (ResourceAccessReview) SwaggerDoc() map[string]string { + return map_ResourceAccessReview +} + +var map_ResourceAccessReviewResponse = map[string]string{ + "": "ResourceAccessReviewResponse describes who can perform the action", + "namespace": "Namespace is the namespace used for the access review", + "users": "UsersSlice is the list of users who can perform the action", + "groups": "GroupsSlice is the list of groups who can perform the action", + "evalutionError": "EvaluationError is an indication that some error occurred during resolution, but partial results can still be returned. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.", +} + +func (ResourceAccessReviewResponse) SwaggerDoc() map[string]string { + return map_ResourceAccessReviewResponse +} + +var map_Role = map[string]string{ + "": "Role is a logical grouping of PolicyRules that can be referenced as a unit by RoleBindings.", + "rules": "Rules holds all the PolicyRules for this Role", +} + +func (Role) SwaggerDoc() map[string]string { + return map_Role +} + +var map_RoleBinding = map[string]string{ + "": "RoleBinding references a Role, but not contain it. It can reference any Role in the same namespace or in the global namespace. It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).", + "userNames": "UserNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "groupNames": "GroupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "subjects": "Subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.", + "roleRef": "RoleRef can only reference the current namespace and the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.", +} + +func (RoleBinding) SwaggerDoc() map[string]string { + return map_RoleBinding +} + +var map_RoleBindingList = map[string]string{ + "": "RoleBindingList is a collection of RoleBindings", + "items": "Items is a list of RoleBindings", +} + +func (RoleBindingList) SwaggerDoc() map[string]string { + return map_RoleBindingList +} + +var map_RoleBindingRestriction = map[string]string{ + "": "RoleBindingRestriction is an object that can be matched against a subject (user, group, or service account) to determine whether rolebindings on that subject are allowed in the namespace to which the RoleBindingRestriction belongs. If any one of those RoleBindingRestriction objects matches a subject, rolebindings on that subject in the namespace are allowed.", + "spec": "Spec defines the matcher.", +} + +func (RoleBindingRestriction) SwaggerDoc() map[string]string { + return map_RoleBindingRestriction +} + +var map_RoleBindingRestrictionList = map[string]string{ + "": "RoleBindingRestrictionList is a collection of RoleBindingRestriction objects.", + "items": "Items is a list of RoleBindingRestriction objects.", +} + +func (RoleBindingRestrictionList) SwaggerDoc() map[string]string { + return map_RoleBindingRestrictionList +} + +var map_RoleBindingRestrictionSpec = map[string]string{ + "": "RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one field must be non-nil.", + "userrestriction": "UserRestriction matches against user subjects.", + "grouprestriction": "GroupRestriction matches against group subjects.", + "serviceaccountrestriction": "ServiceAccountRestriction matches against service-account subjects.", +} + +func (RoleBindingRestrictionSpec) SwaggerDoc() map[string]string { + return map_RoleBindingRestrictionSpec +} + +var map_RoleList = map[string]string{ + "": "RoleList is a collection of Roles", + "items": "Items is a list of Roles", +} + +func (RoleList) SwaggerDoc() map[string]string { + return map_RoleList +} + +var map_SelfSubjectRulesReview = map[string]string{ + "": "SelfSubjectRulesReview is a resource you can create to determine which actions you can perform in a namespace", + "spec": "Spec adds information about how to conduct the check", + "status": "Status is completed by the server to tell which permissions you have", +} + +func (SelfSubjectRulesReview) SwaggerDoc() map[string]string { + return map_SelfSubjectRulesReview +} + +var map_SelfSubjectRulesReviewSpec = map[string]string{ + "": "SelfSubjectRulesReviewSpec adds information about how to conduct the check", + "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil means \"use the scopes on this request\".", +} + +func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string { + return map_SelfSubjectRulesReviewSpec +} + +var map_ServiceAccountReference = map[string]string{ + "": "ServiceAccountReference specifies a service account and namespace by their names.", + "name": "Name is the name of the service account.", + "namespace": "Namespace is the namespace of the service account. Service accounts from inside the whitelisted namespaces are allowed to be bound to roles. If Namespace is empty, then the namespace of the RoleBindingRestriction in which the ServiceAccountReference is embedded is used.", +} + +func (ServiceAccountReference) SwaggerDoc() map[string]string { + return map_ServiceAccountReference +} + +var map_ServiceAccountRestriction = map[string]string{ + "": "ServiceAccountRestriction matches a service account by a string match on either the service-account name or the name of the service account's namespace.", + "serviceaccounts": "ServiceAccounts specifies a list of literal service-account names.", + "namespaces": "Namespaces specifies a list of literal namespace names.", +} + +func (ServiceAccountRestriction) SwaggerDoc() map[string]string { + return map_ServiceAccountRestriction +} + +var map_SubjectAccessReview = map[string]string{ + "": "SubjectAccessReview is an object for requesting information about whether a user or group can perform an action", + "user": "User is optional. If both User and Groups are empty, the current authenticated user is used.", + "groups": "GroupsSlice is optional. Groups is the list of groups to which the User belongs.", + "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.", +} + +func (SubjectAccessReview) SwaggerDoc() map[string]string { + return map_SubjectAccessReview +} + +var map_SubjectAccessReviewResponse = map[string]string{ + "": "SubjectAccessReviewResponse describes whether or not a user or group can perform an action", + "namespace": "Namespace is the namespace used for the access review", + "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "reason": "Reason is optional. It indicates why a request was allowed or denied.", + "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.", +} + +func (SubjectAccessReviewResponse) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewResponse +} + +var map_SubjectRulesReview = map[string]string{ + "": "SubjectRulesReview is a resource you can create to determine which actions another user can perform in a namespace", + "spec": "Spec adds information about how to conduct the check", + "status": "Status is completed by the server to tell which permissions you have", +} + +func (SubjectRulesReview) SwaggerDoc() map[string]string { + return map_SubjectRulesReview +} + +var map_SubjectRulesReviewSpec = map[string]string{ + "": "SubjectRulesReviewSpec adds information about how to conduct the check", + "user": "User is optional. At least one of User and Groups must be specified.", + "groups": "Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.", + "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\".", +} + +func (SubjectRulesReviewSpec) SwaggerDoc() map[string]string { + return map_SubjectRulesReviewSpec +} + +var map_SubjectRulesReviewStatus = map[string]string{ + "": "SubjectRulesReviewStatus is contains the result of a rules check", + "rules": "Rules is the list of rules (no particular sort) that are allowed for the subject", + "evaluationError": "EvaluationError can appear in combination with Rules. It means some error happened during evaluation that may have prevented additional rules from being populated.", +} + +func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string { + return map_SubjectRulesReviewStatus +} + +var map_UserRestriction = map[string]string{ + "": "UserRestriction matches a user either by a string match on the user name, a string match on the name of a group to which the user belongs, or a label selector applied to the user labels.", + "users": "Users specifies a list of literal user names.", + "groups": "Groups specifies a list of literal group names.", + "labels": "Selectors specifies a list of label selectors over user labels.", +} + +func (UserRestriction) SwaggerDoc() map[string]string { + return map_UserRestriction +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/build/OWNERS b/vendor/github.com/openshift/api/build/OWNERS new file mode 100644 index 0000000000000000000000000000000000000000..c1ece8b2136fa1403225d55266da33c80a28610a --- /dev/null +++ b/vendor/github.com/openshift/api/build/OWNERS @@ -0,0 +1,4 @@ +reviewers: + - bparees + - gabemontero + - jim-minter diff --git a/vendor/github.com/openshift/api/build/install.go b/vendor/github.com/openshift/api/build/install.go new file mode 100644 index 0000000000000000000000000000000000000000..87e2c26b0f18dcae773d357bbb47a09edef14139 --- /dev/null +++ b/vendor/github.com/openshift/api/build/install.go @@ -0,0 +1,26 @@ +package build + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + buildv1 "github.com/openshift/api/build/v1" +) + +const ( + GroupName = "build.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(buildv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/build/v1/consts.go b/vendor/github.com/openshift/api/build/v1/consts.go new file mode 100644 index 0000000000000000000000000000000000000000..3eb187df5e8770c907879698a17aab2178e2b951 --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/consts.go @@ -0,0 +1,192 @@ +package v1 + +// annotations +const ( + // BuildAnnotation is an annotation that identifies a Pod as being for a Build + BuildAnnotation = "openshift.io/build.name" + + // BuildConfigAnnotation is an annotation that identifies the BuildConfig that a Build was created from + BuildConfigAnnotation = "openshift.io/build-config.name" + + // BuildCloneAnnotation is an annotation whose value is the name of the build this build was cloned from + BuildCloneAnnotation = "openshift.io/build.clone-of" + + // BuildNumberAnnotation is an annotation whose value is the sequential number for this Build + BuildNumberAnnotation = "openshift.io/build.number" + + // BuildPodNameAnnotation is an annotation whose value is the name of the pod running this build + BuildPodNameAnnotation = "openshift.io/build.pod-name" + + // BuildJenkinsStatusJSONAnnotation is an annotation holding the Jenkins status information + BuildJenkinsStatusJSONAnnotation = "openshift.io/jenkins-status-json" + + // BuildJenkinsLogURLAnnotation is an annotation holding a link to the raw Jenkins build console log + BuildJenkinsLogURLAnnotation = "openshift.io/jenkins-log-url" + + // BuildJenkinsConsoleLogURLAnnotation is an annotation holding a link to the Jenkins build console log (including Jenkins chrome wrappering) + BuildJenkinsConsoleLogURLAnnotation = "openshift.io/jenkins-console-log-url" + + // BuildJenkinsBlueOceanLogURLAnnotation is an annotation holding a link to the Jenkins build console log via the Jenkins BlueOcean UI Plugin + BuildJenkinsBlueOceanLogURLAnnotation = "openshift.io/jenkins-blueocean-log-url" + + // BuildJenkinsBuildURIAnnotation is an annotation holding a link to the Jenkins build + BuildJenkinsBuildURIAnnotation = "openshift.io/jenkins-build-uri" + + // BuildSourceSecretMatchURIAnnotationPrefix is a prefix for annotations on a Secret which indicate a source URI against which the Secret can be used + BuildSourceSecretMatchURIAnnotationPrefix = "build.openshift.io/source-secret-match-uri-" + + // BuildConfigPausedAnnotation is an annotation that marks a BuildConfig as paused. + // New Builds cannot be instantiated from a paused BuildConfig. + BuildConfigPausedAnnotation = "openshift.io/build-config.paused" +) + +// labels +const ( + // BuildConfigLabel is the key of a Build label whose value is the ID of a BuildConfig + // on which the Build is based. NOTE: The value for this label may not contain the entire + // BuildConfig name because it will be truncated to maximum label length. + BuildConfigLabel = "openshift.io/build-config.name" + + // BuildLabel is the key of a Pod label whose value is the Name of a Build which is run. + // NOTE: The value for this label may not contain the entire Build name because it will be + // truncated to maximum label length. + BuildLabel = "openshift.io/build.name" + + // BuildRunPolicyLabel represents the start policy used to to start the build. + BuildRunPolicyLabel = "openshift.io/build.start-policy" + + // BuildConfigLabelDeprecated was used as BuildConfigLabel before adding namespaces. + // We keep it for backward compatibility. + BuildConfigLabelDeprecated = "buildconfig" +) + +const ( + // StatusReasonError is a generic reason for a build error condition. + StatusReasonError StatusReason = "Error" + + // StatusReasonCannotCreateBuildPodSpec is an error condition when the build + // strategy cannot create a build pod spec. + StatusReasonCannotCreateBuildPodSpec StatusReason = "CannotCreateBuildPodSpec" + + // StatusReasonCannotCreateBuildPod is an error condition when a build pod + // cannot be created. + StatusReasonCannotCreateBuildPod StatusReason = "CannotCreateBuildPod" + + // StatusReasonInvalidOutputReference is an error condition when the build + // output is an invalid reference. + StatusReasonInvalidOutputReference StatusReason = "InvalidOutputReference" + + // StatusReasonInvalidImageReference is an error condition when the build + // references an invalid image. + StatusReasonInvalidImageReference StatusReason = "InvalidImageReference" + + // StatusReasonCancelBuildFailed is an error condition when cancelling a build + // fails. + StatusReasonCancelBuildFailed StatusReason = "CancelBuildFailed" + + // StatusReasonBuildPodDeleted is an error condition when the build pod is + // deleted before build completion. + StatusReasonBuildPodDeleted StatusReason = "BuildPodDeleted" + + // StatusReasonExceededRetryTimeout is an error condition when the build has + // not completed and retrying the build times out. + StatusReasonExceededRetryTimeout StatusReason = "ExceededRetryTimeout" + + // StatusReasonMissingPushSecret indicates that the build is missing required + // secret for pushing the output image. + // The build will stay in the pending state until the secret is created, or the build times out. + StatusReasonMissingPushSecret StatusReason = "MissingPushSecret" + + // StatusReasonPostCommitHookFailed indicates the post-commit hook failed. + StatusReasonPostCommitHookFailed StatusReason = "PostCommitHookFailed" + + // StatusReasonPushImageToRegistryFailed indicates that an image failed to be + // pushed to the registry. + StatusReasonPushImageToRegistryFailed StatusReason = "PushImageToRegistryFailed" + + // StatusReasonPullBuilderImageFailed indicates that we failed to pull the + // builder image. + StatusReasonPullBuilderImageFailed StatusReason = "PullBuilderImageFailed" + + // StatusReasonFetchSourceFailed indicates that fetching the source of the + // build has failed. + StatusReasonFetchSourceFailed StatusReason = "FetchSourceFailed" + + // StatusReasonInvalidContextDirectory indicates that the supplied + // contextDir does not exist + StatusReasonInvalidContextDirectory StatusReason = "InvalidContextDirectory" + + // StatusReasonCancelledBuild indicates that the build was cancelled by the + // user. + StatusReasonCancelledBuild StatusReason = "CancelledBuild" + + // StatusReasonDockerBuildFailed indicates that the container image build strategy has + // failed. + StatusReasonDockerBuildFailed StatusReason = "DockerBuildFailed" + + // StatusReasonBuildPodExists indicates that the build tried to create a + // build pod but one was already present. + StatusReasonBuildPodExists StatusReason = "BuildPodExists" + + // StatusReasonNoBuildContainerStatus indicates that the build failed because the + // the build pod has no container statuses. + StatusReasonNoBuildContainerStatus StatusReason = "NoBuildContainerStatus" + + // StatusReasonFailedContainer indicates that the pod for the build has at least + // one container with a non-zero exit status. + StatusReasonFailedContainer StatusReason = "FailedContainer" + + // StatusReasonUnresolvableEnvironmentVariable indicates that an error occurred processing + // the supplied options for environment variables in the build strategy environment + StatusReasonUnresolvableEnvironmentVariable StatusReason = "UnresolvableEnvironmentVariable" + + // StatusReasonGenericBuildFailed is the reason associated with a broad + // range of build failures. + StatusReasonGenericBuildFailed StatusReason = "GenericBuildFailed" + + // StatusReasonOutOfMemoryKilled indicates that the build pod was killed for its memory consumption + StatusReasonOutOfMemoryKilled StatusReason = "OutOfMemoryKilled" + + // StatusReasonCannotRetrieveServiceAccount is the reason associated with a failure + // to look up the service account associated with the BuildConfig. + StatusReasonCannotRetrieveServiceAccount StatusReason = "CannotRetrieveServiceAccount" + + // StatusReasonBuildPodEvicted is the reason a build fails due to the build pod being evicted + // from its node + StatusReasonBuildPodEvicted StatusReason = "BuildPodEvicted" +) + +// env vars +// WhitelistEnvVarNames is a list of special env vars allows s2i containers +var WhitelistEnvVarNames = []string{"BUILD_LOGLEVEL", "GIT_SSL_NO_VERIFY", "HTTP_PROXY", "HTTPS_PROXY", "LANG", "NO_PROXY"} + +// env vars +const ( + + // CustomBuildStrategyBaseImageKey is the environment variable that indicates the base image to be used when + // performing a custom build, if needed. + CustomBuildStrategyBaseImageKey = "OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE" + + // AllowedUIDs is an environment variable that contains ranges of UIDs that are allowed in + // Source builder images + AllowedUIDs = "ALLOWED_UIDS" + // DropCapabilities is an environment variable that contains a list of capabilities to drop when + // executing a Source build + DropCapabilities = "DROP_CAPS" +) + +// keys inside of secrets and configmaps +const ( + // WebHookSecretKey is the key used to identify the value containing the webhook invocation + // secret within a secret referenced by a webhook trigger. + WebHookSecretKey = "WebHookSecretKey" + + // RegistryConfKey is the ConfigMap key for the build pod's registry configuration file. + RegistryConfKey = "registries.conf" + + // SignaturePolicyKey is the ConfigMap key for the build pod's image signature policy file. + SignaturePolicyKey = "policy.json" + + // ServiceCAKey is the ConfigMap key for the service signing certificate authority mounted into build pods. + ServiceCAKey = "service-ca.crt" +) diff --git a/vendor/github.com/openshift/api/build/v1/doc.go b/vendor/github.com/openshift/api/build/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..9bc16f64b2359b76a96b1723af61831fcba6eeeb --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/build/apis/build +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=build.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/build/v1/generated.pb.go b/vendor/github.com/openshift/api/build/v1/generated.pb.go new file mode 100644 index 0000000000000000000000000000000000000000..9c3e107082b7384cbca473f6fa4b51ef1e880d82 --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/generated.pb.go @@ -0,0 +1,16272 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/build/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *BinaryBuildRequestOptions) Reset() { *m = BinaryBuildRequestOptions{} } +func (*BinaryBuildRequestOptions) ProtoMessage() {} +func (*BinaryBuildRequestOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{0} +} +func (m *BinaryBuildRequestOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BinaryBuildRequestOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BinaryBuildRequestOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_BinaryBuildRequestOptions.Merge(m, src) +} +func (m *BinaryBuildRequestOptions) XXX_Size() int { + return m.Size() +} +func (m *BinaryBuildRequestOptions) XXX_DiscardUnknown() { + xxx_messageInfo_BinaryBuildRequestOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_BinaryBuildRequestOptions proto.InternalMessageInfo + +func (m *BinaryBuildSource) Reset() { *m = BinaryBuildSource{} } +func (*BinaryBuildSource) ProtoMessage() {} +func (*BinaryBuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{1} +} +func (m *BinaryBuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BinaryBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BinaryBuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_BinaryBuildSource.Merge(m, src) +} +func (m *BinaryBuildSource) XXX_Size() int { + return m.Size() +} +func (m *BinaryBuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_BinaryBuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_BinaryBuildSource proto.InternalMessageInfo + +func (m *BitbucketWebHookCause) Reset() { *m = BitbucketWebHookCause{} } +func (*BitbucketWebHookCause) ProtoMessage() {} +func (*BitbucketWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{2} +} +func (m *BitbucketWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BitbucketWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BitbucketWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_BitbucketWebHookCause.Merge(m, src) +} +func (m *BitbucketWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *BitbucketWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_BitbucketWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_BitbucketWebHookCause proto.InternalMessageInfo + +func (m *Build) Reset() { *m = Build{} } +func (*Build) ProtoMessage() {} +func (*Build) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{3} +} +func (m *Build) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Build) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Build) XXX_Merge(src proto.Message) { + xxx_messageInfo_Build.Merge(m, src) +} +func (m *Build) XXX_Size() int { + return m.Size() +} +func (m *Build) XXX_DiscardUnknown() { + xxx_messageInfo_Build.DiscardUnknown(m) +} + +var xxx_messageInfo_Build proto.InternalMessageInfo + +func (m *BuildCondition) Reset() { *m = BuildCondition{} } +func (*BuildCondition) ProtoMessage() {} +func (*BuildCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{4} +} +func (m *BuildCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildCondition.Merge(m, src) +} +func (m *BuildCondition) XXX_Size() int { + return m.Size() +} +func (m *BuildCondition) XXX_DiscardUnknown() { + xxx_messageInfo_BuildCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildCondition proto.InternalMessageInfo + +func (m *BuildConfig) Reset() { *m = BuildConfig{} } +func (*BuildConfig) ProtoMessage() {} +func (*BuildConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{5} +} +func (m *BuildConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildConfig.Merge(m, src) +} +func (m *BuildConfig) XXX_Size() int { + return m.Size() +} +func (m *BuildConfig) XXX_DiscardUnknown() { + xxx_messageInfo_BuildConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildConfig proto.InternalMessageInfo + +func (m *BuildConfigList) Reset() { *m = BuildConfigList{} } +func (*BuildConfigList) ProtoMessage() {} +func (*BuildConfigList) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{6} +} +func (m *BuildConfigList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildConfigList) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildConfigList.Merge(m, src) +} +func (m *BuildConfigList) XXX_Size() int { + return m.Size() +} +func (m *BuildConfigList) XXX_DiscardUnknown() { + xxx_messageInfo_BuildConfigList.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildConfigList proto.InternalMessageInfo + +func (m *BuildConfigSpec) Reset() { *m = BuildConfigSpec{} } +func (*BuildConfigSpec) ProtoMessage() {} +func (*BuildConfigSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{7} +} +func (m *BuildConfigSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildConfigSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildConfigSpec.Merge(m, src) +} +func (m *BuildConfigSpec) XXX_Size() int { + return m.Size() +} +func (m *BuildConfigSpec) XXX_DiscardUnknown() { + xxx_messageInfo_BuildConfigSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildConfigSpec proto.InternalMessageInfo + +func (m *BuildConfigStatus) Reset() { *m = BuildConfigStatus{} } +func (*BuildConfigStatus) ProtoMessage() {} +func (*BuildConfigStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{8} +} +func (m *BuildConfigStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildConfigStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildConfigStatus.Merge(m, src) +} +func (m *BuildConfigStatus) XXX_Size() int { + return m.Size() +} +func (m *BuildConfigStatus) XXX_DiscardUnknown() { + xxx_messageInfo_BuildConfigStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildConfigStatus proto.InternalMessageInfo + +func (m *BuildList) Reset() { *m = BuildList{} } +func (*BuildList) ProtoMessage() {} +func (*BuildList) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{9} +} +func (m *BuildList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildList) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildList.Merge(m, src) +} +func (m *BuildList) XXX_Size() int { + return m.Size() +} +func (m *BuildList) XXX_DiscardUnknown() { + xxx_messageInfo_BuildList.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildList proto.InternalMessageInfo + +func (m *BuildLog) Reset() { *m = BuildLog{} } +func (*BuildLog) ProtoMessage() {} +func (*BuildLog) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{10} +} +func (m *BuildLog) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildLog.Merge(m, src) +} +func (m *BuildLog) XXX_Size() int { + return m.Size() +} +func (m *BuildLog) XXX_DiscardUnknown() { + xxx_messageInfo_BuildLog.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildLog proto.InternalMessageInfo + +func (m *BuildLogOptions) Reset() { *m = BuildLogOptions{} } +func (*BuildLogOptions) ProtoMessage() {} +func (*BuildLogOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{11} +} +func (m *BuildLogOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildLogOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildLogOptions.Merge(m, src) +} +func (m *BuildLogOptions) XXX_Size() int { + return m.Size() +} +func (m *BuildLogOptions) XXX_DiscardUnknown() { + xxx_messageInfo_BuildLogOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildLogOptions proto.InternalMessageInfo + +func (m *BuildOutput) Reset() { *m = BuildOutput{} } +func (*BuildOutput) ProtoMessage() {} +func (*BuildOutput) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{12} +} +func (m *BuildOutput) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildOutput) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildOutput.Merge(m, src) +} +func (m *BuildOutput) XXX_Size() int { + return m.Size() +} +func (m *BuildOutput) XXX_DiscardUnknown() { + xxx_messageInfo_BuildOutput.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildOutput proto.InternalMessageInfo + +func (m *BuildPostCommitSpec) Reset() { *m = BuildPostCommitSpec{} } +func (*BuildPostCommitSpec) ProtoMessage() {} +func (*BuildPostCommitSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{13} +} +func (m *BuildPostCommitSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildPostCommitSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildPostCommitSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildPostCommitSpec.Merge(m, src) +} +func (m *BuildPostCommitSpec) XXX_Size() int { + return m.Size() +} +func (m *BuildPostCommitSpec) XXX_DiscardUnknown() { + xxx_messageInfo_BuildPostCommitSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildPostCommitSpec proto.InternalMessageInfo + +func (m *BuildRequest) Reset() { *m = BuildRequest{} } +func (*BuildRequest) ProtoMessage() {} +func (*BuildRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{14} +} +func (m *BuildRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildRequest.Merge(m, src) +} +func (m *BuildRequest) XXX_Size() int { + return m.Size() +} +func (m *BuildRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BuildRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildRequest proto.InternalMessageInfo + +func (m *BuildSource) Reset() { *m = BuildSource{} } +func (*BuildSource) ProtoMessage() {} +func (*BuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{15} +} +func (m *BuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildSource.Merge(m, src) +} +func (m *BuildSource) XXX_Size() int { + return m.Size() +} +func (m *BuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_BuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildSource proto.InternalMessageInfo + +func (m *BuildSpec) Reset() { *m = BuildSpec{} } +func (*BuildSpec) ProtoMessage() {} +func (*BuildSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{16} +} +func (m *BuildSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildSpec.Merge(m, src) +} +func (m *BuildSpec) XXX_Size() int { + return m.Size() +} +func (m *BuildSpec) XXX_DiscardUnknown() { + xxx_messageInfo_BuildSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildSpec proto.InternalMessageInfo + +func (m *BuildStatus) Reset() { *m = BuildStatus{} } +func (*BuildStatus) ProtoMessage() {} +func (*BuildStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{17} +} +func (m *BuildStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildStatus.Merge(m, src) +} +func (m *BuildStatus) XXX_Size() int { + return m.Size() +} +func (m *BuildStatus) XXX_DiscardUnknown() { + xxx_messageInfo_BuildStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildStatus proto.InternalMessageInfo + +func (m *BuildStatusOutput) Reset() { *m = BuildStatusOutput{} } +func (*BuildStatusOutput) ProtoMessage() {} +func (*BuildStatusOutput) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{18} +} +func (m *BuildStatusOutput) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildStatusOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildStatusOutput) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildStatusOutput.Merge(m, src) +} +func (m *BuildStatusOutput) XXX_Size() int { + return m.Size() +} +func (m *BuildStatusOutput) XXX_DiscardUnknown() { + xxx_messageInfo_BuildStatusOutput.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildStatusOutput proto.InternalMessageInfo + +func (m *BuildStatusOutputTo) Reset() { *m = BuildStatusOutputTo{} } +func (*BuildStatusOutputTo) ProtoMessage() {} +func (*BuildStatusOutputTo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{19} +} +func (m *BuildStatusOutputTo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildStatusOutputTo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildStatusOutputTo) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildStatusOutputTo.Merge(m, src) +} +func (m *BuildStatusOutputTo) XXX_Size() int { + return m.Size() +} +func (m *BuildStatusOutputTo) XXX_DiscardUnknown() { + xxx_messageInfo_BuildStatusOutputTo.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildStatusOutputTo proto.InternalMessageInfo + +func (m *BuildStrategy) Reset() { *m = BuildStrategy{} } +func (*BuildStrategy) ProtoMessage() {} +func (*BuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{20} +} +func (m *BuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildStrategy.Merge(m, src) +} +func (m *BuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *BuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_BuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildStrategy proto.InternalMessageInfo + +func (m *BuildTriggerCause) Reset() { *m = BuildTriggerCause{} } +func (*BuildTriggerCause) ProtoMessage() {} +func (*BuildTriggerCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{21} +} +func (m *BuildTriggerCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildTriggerCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildTriggerCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildTriggerCause.Merge(m, src) +} +func (m *BuildTriggerCause) XXX_Size() int { + return m.Size() +} +func (m *BuildTriggerCause) XXX_DiscardUnknown() { + xxx_messageInfo_BuildTriggerCause.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildTriggerCause proto.InternalMessageInfo + +func (m *BuildTriggerPolicy) Reset() { *m = BuildTriggerPolicy{} } +func (*BuildTriggerPolicy) ProtoMessage() {} +func (*BuildTriggerPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{22} +} +func (m *BuildTriggerPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildTriggerPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildTriggerPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildTriggerPolicy.Merge(m, src) +} +func (m *BuildTriggerPolicy) XXX_Size() int { + return m.Size() +} +func (m *BuildTriggerPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_BuildTriggerPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildTriggerPolicy proto.InternalMessageInfo + +func (m *CommonSpec) Reset() { *m = CommonSpec{} } +func (*CommonSpec) ProtoMessage() {} +func (*CommonSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{23} +} +func (m *CommonSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommonSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CommonSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommonSpec.Merge(m, src) +} +func (m *CommonSpec) XXX_Size() int { + return m.Size() +} +func (m *CommonSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CommonSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CommonSpec proto.InternalMessageInfo + +func (m *CommonWebHookCause) Reset() { *m = CommonWebHookCause{} } +func (*CommonWebHookCause) ProtoMessage() {} +func (*CommonWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{24} +} +func (m *CommonWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommonWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CommonWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommonWebHookCause.Merge(m, src) +} +func (m *CommonWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *CommonWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_CommonWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_CommonWebHookCause proto.InternalMessageInfo + +func (m *ConfigMapBuildSource) Reset() { *m = ConfigMapBuildSource{} } +func (*ConfigMapBuildSource) ProtoMessage() {} +func (*ConfigMapBuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{25} +} +func (m *ConfigMapBuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapBuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapBuildSource.Merge(m, src) +} +func (m *ConfigMapBuildSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapBuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapBuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigMapBuildSource proto.InternalMessageInfo + +func (m *CustomBuildStrategy) Reset() { *m = CustomBuildStrategy{} } +func (*CustomBuildStrategy) ProtoMessage() {} +func (*CustomBuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{26} +} +func (m *CustomBuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomBuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomBuildStrategy.Merge(m, src) +} +func (m *CustomBuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *CustomBuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_CustomBuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomBuildStrategy proto.InternalMessageInfo + +func (m *DockerBuildStrategy) Reset() { *m = DockerBuildStrategy{} } +func (*DockerBuildStrategy) ProtoMessage() {} +func (*DockerBuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{27} +} +func (m *DockerBuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DockerBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DockerBuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DockerBuildStrategy.Merge(m, src) +} +func (m *DockerBuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *DockerBuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DockerBuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DockerBuildStrategy proto.InternalMessageInfo + +func (m *DockerStrategyOptions) Reset() { *m = DockerStrategyOptions{} } +func (*DockerStrategyOptions) ProtoMessage() {} +func (*DockerStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{28} +} +func (m *DockerStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DockerStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DockerStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DockerStrategyOptions.Merge(m, src) +} +func (m *DockerStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *DockerStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DockerStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DockerStrategyOptions proto.InternalMessageInfo + +func (m *GenericWebHookCause) Reset() { *m = GenericWebHookCause{} } +func (*GenericWebHookCause) ProtoMessage() {} +func (*GenericWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{29} +} +func (m *GenericWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenericWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GenericWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenericWebHookCause.Merge(m, src) +} +func (m *GenericWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *GenericWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_GenericWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_GenericWebHookCause proto.InternalMessageInfo + +func (m *GenericWebHookEvent) Reset() { *m = GenericWebHookEvent{} } +func (*GenericWebHookEvent) ProtoMessage() {} +func (*GenericWebHookEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{30} +} +func (m *GenericWebHookEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenericWebHookEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GenericWebHookEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenericWebHookEvent.Merge(m, src) +} +func (m *GenericWebHookEvent) XXX_Size() int { + return m.Size() +} +func (m *GenericWebHookEvent) XXX_DiscardUnknown() { + xxx_messageInfo_GenericWebHookEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_GenericWebHookEvent proto.InternalMessageInfo + +func (m *GitBuildSource) Reset() { *m = GitBuildSource{} } +func (*GitBuildSource) ProtoMessage() {} +func (*GitBuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{31} +} +func (m *GitBuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitBuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitBuildSource.Merge(m, src) +} +func (m *GitBuildSource) XXX_Size() int { + return m.Size() +} +func (m *GitBuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_GitBuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_GitBuildSource proto.InternalMessageInfo + +func (m *GitHubWebHookCause) Reset() { *m = GitHubWebHookCause{} } +func (*GitHubWebHookCause) ProtoMessage() {} +func (*GitHubWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{32} +} +func (m *GitHubWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitHubWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitHubWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitHubWebHookCause.Merge(m, src) +} +func (m *GitHubWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *GitHubWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_GitHubWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_GitHubWebHookCause proto.InternalMessageInfo + +func (m *GitInfo) Reset() { *m = GitInfo{} } +func (*GitInfo) ProtoMessage() {} +func (*GitInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{33} +} +func (m *GitInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitInfo.Merge(m, src) +} +func (m *GitInfo) XXX_Size() int { + return m.Size() +} +func (m *GitInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GitInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GitInfo proto.InternalMessageInfo + +func (m *GitLabWebHookCause) Reset() { *m = GitLabWebHookCause{} } +func (*GitLabWebHookCause) ProtoMessage() {} +func (*GitLabWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{34} +} +func (m *GitLabWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitLabWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitLabWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitLabWebHookCause.Merge(m, src) +} +func (m *GitLabWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *GitLabWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_GitLabWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_GitLabWebHookCause proto.InternalMessageInfo + +func (m *GitRefInfo) Reset() { *m = GitRefInfo{} } +func (*GitRefInfo) ProtoMessage() {} +func (*GitRefInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{35} +} +func (m *GitRefInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitRefInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitRefInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitRefInfo.Merge(m, src) +} +func (m *GitRefInfo) XXX_Size() int { + return m.Size() +} +func (m *GitRefInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GitRefInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GitRefInfo proto.InternalMessageInfo + +func (m *GitSourceRevision) Reset() { *m = GitSourceRevision{} } +func (*GitSourceRevision) ProtoMessage() {} +func (*GitSourceRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{36} +} +func (m *GitSourceRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitSourceRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitSourceRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitSourceRevision.Merge(m, src) +} +func (m *GitSourceRevision) XXX_Size() int { + return m.Size() +} +func (m *GitSourceRevision) XXX_DiscardUnknown() { + xxx_messageInfo_GitSourceRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_GitSourceRevision proto.InternalMessageInfo + +func (m *ImageChangeCause) Reset() { *m = ImageChangeCause{} } +func (*ImageChangeCause) ProtoMessage() {} +func (*ImageChangeCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{37} +} +func (m *ImageChangeCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageChangeCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageChangeCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageChangeCause.Merge(m, src) +} +func (m *ImageChangeCause) XXX_Size() int { + return m.Size() +} +func (m *ImageChangeCause) XXX_DiscardUnknown() { + xxx_messageInfo_ImageChangeCause.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageChangeCause proto.InternalMessageInfo + +func (m *ImageChangeTrigger) Reset() { *m = ImageChangeTrigger{} } +func (*ImageChangeTrigger) ProtoMessage() {} +func (*ImageChangeTrigger) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{38} +} +func (m *ImageChangeTrigger) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageChangeTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageChangeTrigger) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageChangeTrigger.Merge(m, src) +} +func (m *ImageChangeTrigger) XXX_Size() int { + return m.Size() +} +func (m *ImageChangeTrigger) XXX_DiscardUnknown() { + xxx_messageInfo_ImageChangeTrigger.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageChangeTrigger proto.InternalMessageInfo + +func (m *ImageLabel) Reset() { *m = ImageLabel{} } +func (*ImageLabel) ProtoMessage() {} +func (*ImageLabel) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{39} +} +func (m *ImageLabel) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageLabel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageLabel) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageLabel.Merge(m, src) +} +func (m *ImageLabel) XXX_Size() int { + return m.Size() +} +func (m *ImageLabel) XXX_DiscardUnknown() { + xxx_messageInfo_ImageLabel.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageLabel proto.InternalMessageInfo + +func (m *ImageSource) Reset() { *m = ImageSource{} } +func (*ImageSource) ProtoMessage() {} +func (*ImageSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{40} +} +func (m *ImageSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageSource.Merge(m, src) +} +func (m *ImageSource) XXX_Size() int { + return m.Size() +} +func (m *ImageSource) XXX_DiscardUnknown() { + xxx_messageInfo_ImageSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageSource proto.InternalMessageInfo + +func (m *ImageSourcePath) Reset() { *m = ImageSourcePath{} } +func (*ImageSourcePath) ProtoMessage() {} +func (*ImageSourcePath) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{41} +} +func (m *ImageSourcePath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageSourcePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageSourcePath) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageSourcePath.Merge(m, src) +} +func (m *ImageSourcePath) XXX_Size() int { + return m.Size() +} +func (m *ImageSourcePath) XXX_DiscardUnknown() { + xxx_messageInfo_ImageSourcePath.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageSourcePath proto.InternalMessageInfo + +func (m *JenkinsPipelineBuildStrategy) Reset() { *m = JenkinsPipelineBuildStrategy{} } +func (*JenkinsPipelineBuildStrategy) ProtoMessage() {} +func (*JenkinsPipelineBuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{42} +} +func (m *JenkinsPipelineBuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JenkinsPipelineBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JenkinsPipelineBuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_JenkinsPipelineBuildStrategy.Merge(m, src) +} +func (m *JenkinsPipelineBuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *JenkinsPipelineBuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_JenkinsPipelineBuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_JenkinsPipelineBuildStrategy proto.InternalMessageInfo + +func (m *OptionalNodeSelector) Reset() { *m = OptionalNodeSelector{} } +func (*OptionalNodeSelector) ProtoMessage() {} +func (*OptionalNodeSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{43} +} +func (m *OptionalNodeSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OptionalNodeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OptionalNodeSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_OptionalNodeSelector.Merge(m, src) +} +func (m *OptionalNodeSelector) XXX_Size() int { + return m.Size() +} +func (m *OptionalNodeSelector) XXX_DiscardUnknown() { + xxx_messageInfo_OptionalNodeSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_OptionalNodeSelector proto.InternalMessageInfo + +func (m *ProxyConfig) Reset() { *m = ProxyConfig{} } +func (*ProxyConfig) ProtoMessage() {} +func (*ProxyConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{44} +} +func (m *ProxyConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProxyConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProxyConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProxyConfig.Merge(m, src) +} +func (m *ProxyConfig) XXX_Size() int { + return m.Size() +} +func (m *ProxyConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ProxyConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ProxyConfig proto.InternalMessageInfo + +func (m *SecretBuildSource) Reset() { *m = SecretBuildSource{} } +func (*SecretBuildSource) ProtoMessage() {} +func (*SecretBuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{45} +} +func (m *SecretBuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretBuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretBuildSource.Merge(m, src) +} +func (m *SecretBuildSource) XXX_Size() int { + return m.Size() +} +func (m *SecretBuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_SecretBuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretBuildSource proto.InternalMessageInfo + +func (m *SecretLocalReference) Reset() { *m = SecretLocalReference{} } +func (*SecretLocalReference) ProtoMessage() {} +func (*SecretLocalReference) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{46} +} +func (m *SecretLocalReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretLocalReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretLocalReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretLocalReference.Merge(m, src) +} +func (m *SecretLocalReference) XXX_Size() int { + return m.Size() +} +func (m *SecretLocalReference) XXX_DiscardUnknown() { + xxx_messageInfo_SecretLocalReference.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretLocalReference proto.InternalMessageInfo + +func (m *SecretSpec) Reset() { *m = SecretSpec{} } +func (*SecretSpec) ProtoMessage() {} +func (*SecretSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{47} +} +func (m *SecretSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretSpec.Merge(m, src) +} +func (m *SecretSpec) XXX_Size() int { + return m.Size() +} +func (m *SecretSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SecretSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretSpec proto.InternalMessageInfo + +func (m *SourceBuildStrategy) Reset() { *m = SourceBuildStrategy{} } +func (*SourceBuildStrategy) ProtoMessage() {} +func (*SourceBuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{48} +} +func (m *SourceBuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceBuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceBuildStrategy.Merge(m, src) +} +func (m *SourceBuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *SourceBuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_SourceBuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceBuildStrategy proto.InternalMessageInfo + +func (m *SourceControlUser) Reset() { *m = SourceControlUser{} } +func (*SourceControlUser) ProtoMessage() {} +func (*SourceControlUser) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{49} +} +func (m *SourceControlUser) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceControlUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceControlUser) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceControlUser.Merge(m, src) +} +func (m *SourceControlUser) XXX_Size() int { + return m.Size() +} +func (m *SourceControlUser) XXX_DiscardUnknown() { + xxx_messageInfo_SourceControlUser.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceControlUser proto.InternalMessageInfo + +func (m *SourceRevision) Reset() { *m = SourceRevision{} } +func (*SourceRevision) ProtoMessage() {} +func (*SourceRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{50} +} +func (m *SourceRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceRevision.Merge(m, src) +} +func (m *SourceRevision) XXX_Size() int { + return m.Size() +} +func (m *SourceRevision) XXX_DiscardUnknown() { + xxx_messageInfo_SourceRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceRevision proto.InternalMessageInfo + +func (m *SourceStrategyOptions) Reset() { *m = SourceStrategyOptions{} } +func (*SourceStrategyOptions) ProtoMessage() {} +func (*SourceStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{51} +} +func (m *SourceStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceStrategyOptions.Merge(m, src) +} +func (m *SourceStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SourceStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SourceStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceStrategyOptions proto.InternalMessageInfo + +func (m *StageInfo) Reset() { *m = StageInfo{} } +func (*StageInfo) ProtoMessage() {} +func (*StageInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{52} +} +func (m *StageInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StageInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StageInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_StageInfo.Merge(m, src) +} +func (m *StageInfo) XXX_Size() int { + return m.Size() +} +func (m *StageInfo) XXX_DiscardUnknown() { + xxx_messageInfo_StageInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_StageInfo proto.InternalMessageInfo + +func (m *StepInfo) Reset() { *m = StepInfo{} } +func (*StepInfo) ProtoMessage() {} +func (*StepInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{53} +} +func (m *StepInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StepInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StepInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_StepInfo.Merge(m, src) +} +func (m *StepInfo) XXX_Size() int { + return m.Size() +} +func (m *StepInfo) XXX_DiscardUnknown() { + xxx_messageInfo_StepInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_StepInfo proto.InternalMessageInfo + +func (m *WebHookTrigger) Reset() { *m = WebHookTrigger{} } +func (*WebHookTrigger) ProtoMessage() {} +func (*WebHookTrigger) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{54} +} +func (m *WebHookTrigger) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebHookTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebHookTrigger) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebHookTrigger.Merge(m, src) +} +func (m *WebHookTrigger) XXX_Size() int { + return m.Size() +} +func (m *WebHookTrigger) XXX_DiscardUnknown() { + xxx_messageInfo_WebHookTrigger.DiscardUnknown(m) +} + +var xxx_messageInfo_WebHookTrigger proto.InternalMessageInfo + +func init() { + proto.RegisterType((*BinaryBuildRequestOptions)(nil), "github.com.openshift.api.build.v1.BinaryBuildRequestOptions") + proto.RegisterType((*BinaryBuildSource)(nil), "github.com.openshift.api.build.v1.BinaryBuildSource") + proto.RegisterType((*BitbucketWebHookCause)(nil), "github.com.openshift.api.build.v1.BitbucketWebHookCause") + proto.RegisterType((*Build)(nil), "github.com.openshift.api.build.v1.Build") + proto.RegisterType((*BuildCondition)(nil), "github.com.openshift.api.build.v1.BuildCondition") + proto.RegisterType((*BuildConfig)(nil), "github.com.openshift.api.build.v1.BuildConfig") + proto.RegisterType((*BuildConfigList)(nil), "github.com.openshift.api.build.v1.BuildConfigList") + proto.RegisterType((*BuildConfigSpec)(nil), "github.com.openshift.api.build.v1.BuildConfigSpec") + proto.RegisterType((*BuildConfigStatus)(nil), "github.com.openshift.api.build.v1.BuildConfigStatus") + proto.RegisterType((*BuildList)(nil), "github.com.openshift.api.build.v1.BuildList") + proto.RegisterType((*BuildLog)(nil), "github.com.openshift.api.build.v1.BuildLog") + proto.RegisterType((*BuildLogOptions)(nil), "github.com.openshift.api.build.v1.BuildLogOptions") + proto.RegisterType((*BuildOutput)(nil), "github.com.openshift.api.build.v1.BuildOutput") + proto.RegisterType((*BuildPostCommitSpec)(nil), "github.com.openshift.api.build.v1.BuildPostCommitSpec") + proto.RegisterType((*BuildRequest)(nil), "github.com.openshift.api.build.v1.BuildRequest") + proto.RegisterType((*BuildSource)(nil), "github.com.openshift.api.build.v1.BuildSource") + proto.RegisterType((*BuildSpec)(nil), "github.com.openshift.api.build.v1.BuildSpec") + proto.RegisterType((*BuildStatus)(nil), "github.com.openshift.api.build.v1.BuildStatus") + proto.RegisterType((*BuildStatusOutput)(nil), "github.com.openshift.api.build.v1.BuildStatusOutput") + proto.RegisterType((*BuildStatusOutputTo)(nil), "github.com.openshift.api.build.v1.BuildStatusOutputTo") + proto.RegisterType((*BuildStrategy)(nil), "github.com.openshift.api.build.v1.BuildStrategy") + proto.RegisterType((*BuildTriggerCause)(nil), "github.com.openshift.api.build.v1.BuildTriggerCause") + proto.RegisterType((*BuildTriggerPolicy)(nil), "github.com.openshift.api.build.v1.BuildTriggerPolicy") + proto.RegisterType((*CommonSpec)(nil), "github.com.openshift.api.build.v1.CommonSpec") + proto.RegisterType((*CommonWebHookCause)(nil), "github.com.openshift.api.build.v1.CommonWebHookCause") + proto.RegisterType((*ConfigMapBuildSource)(nil), "github.com.openshift.api.build.v1.ConfigMapBuildSource") + proto.RegisterType((*CustomBuildStrategy)(nil), "github.com.openshift.api.build.v1.CustomBuildStrategy") + proto.RegisterType((*DockerBuildStrategy)(nil), "github.com.openshift.api.build.v1.DockerBuildStrategy") + proto.RegisterType((*DockerStrategyOptions)(nil), "github.com.openshift.api.build.v1.DockerStrategyOptions") + proto.RegisterType((*GenericWebHookCause)(nil), "github.com.openshift.api.build.v1.GenericWebHookCause") + proto.RegisterType((*GenericWebHookEvent)(nil), "github.com.openshift.api.build.v1.GenericWebHookEvent") + proto.RegisterType((*GitBuildSource)(nil), "github.com.openshift.api.build.v1.GitBuildSource") + proto.RegisterType((*GitHubWebHookCause)(nil), "github.com.openshift.api.build.v1.GitHubWebHookCause") + proto.RegisterType((*GitInfo)(nil), "github.com.openshift.api.build.v1.GitInfo") + proto.RegisterType((*GitLabWebHookCause)(nil), "github.com.openshift.api.build.v1.GitLabWebHookCause") + proto.RegisterType((*GitRefInfo)(nil), "github.com.openshift.api.build.v1.GitRefInfo") + proto.RegisterType((*GitSourceRevision)(nil), "github.com.openshift.api.build.v1.GitSourceRevision") + proto.RegisterType((*ImageChangeCause)(nil), "github.com.openshift.api.build.v1.ImageChangeCause") + proto.RegisterType((*ImageChangeTrigger)(nil), "github.com.openshift.api.build.v1.ImageChangeTrigger") + proto.RegisterType((*ImageLabel)(nil), "github.com.openshift.api.build.v1.ImageLabel") + proto.RegisterType((*ImageSource)(nil), "github.com.openshift.api.build.v1.ImageSource") + proto.RegisterType((*ImageSourcePath)(nil), "github.com.openshift.api.build.v1.ImageSourcePath") + proto.RegisterType((*JenkinsPipelineBuildStrategy)(nil), "github.com.openshift.api.build.v1.JenkinsPipelineBuildStrategy") + proto.RegisterType((*OptionalNodeSelector)(nil), "github.com.openshift.api.build.v1.OptionalNodeSelector") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.build.v1.OptionalNodeSelector.ItemsEntry") + proto.RegisterType((*ProxyConfig)(nil), "github.com.openshift.api.build.v1.ProxyConfig") + proto.RegisterType((*SecretBuildSource)(nil), "github.com.openshift.api.build.v1.SecretBuildSource") + proto.RegisterType((*SecretLocalReference)(nil), "github.com.openshift.api.build.v1.SecretLocalReference") + proto.RegisterType((*SecretSpec)(nil), "github.com.openshift.api.build.v1.SecretSpec") + proto.RegisterType((*SourceBuildStrategy)(nil), "github.com.openshift.api.build.v1.SourceBuildStrategy") + proto.RegisterType((*SourceControlUser)(nil), "github.com.openshift.api.build.v1.SourceControlUser") + proto.RegisterType((*SourceRevision)(nil), "github.com.openshift.api.build.v1.SourceRevision") + proto.RegisterType((*SourceStrategyOptions)(nil), "github.com.openshift.api.build.v1.SourceStrategyOptions") + proto.RegisterType((*StageInfo)(nil), "github.com.openshift.api.build.v1.StageInfo") + proto.RegisterType((*StepInfo)(nil), "github.com.openshift.api.build.v1.StepInfo") + proto.RegisterType((*WebHookTrigger)(nil), "github.com.openshift.api.build.v1.WebHookTrigger") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/build/v1/generated.proto", fileDescriptor_2ba579f6f004cb75) +} + +var fileDescriptor_2ba579f6f004cb75 = []byte{ + // 4044 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5b, 0xcd, 0x6f, 0x1c, 0xc9, + 0x75, 0x57, 0xcf, 0x07, 0x39, 0xf3, 0x86, 0xe2, 0x47, 0x51, 0x5a, 0x51, 0xb2, 0xcc, 0xd1, 0xf6, + 0x62, 0x17, 0xda, 0xec, 0xee, 0xd0, 0x94, 0x25, 0x45, 0xb6, 0x11, 0x07, 0x1c, 0x92, 0xd2, 0x52, + 0xa6, 0x24, 0xa2, 0x86, 0xfb, 0x11, 0xaf, 0xe0, 0xa4, 0xd9, 0x53, 0x33, 0xec, 0xe5, 0x4c, 0x77, + 0x6f, 0x57, 0x0f, 0x77, 0x69, 0x20, 0xc0, 0x22, 0x80, 0x61, 0xd8, 0xbe, 0xc4, 0x17, 0x23, 0xc9, + 0x25, 0x09, 0x82, 0x9c, 0x72, 0xca, 0x21, 0x80, 0x01, 0x5f, 0x02, 0xc4, 0x87, 0x3d, 0x38, 0x80, + 0x83, 0xe4, 0xb0, 0x40, 0x8c, 0x41, 0x76, 0x72, 0xc8, 0x2d, 0x7f, 0x80, 0x0e, 0x41, 0x50, 0x1f, + 0xdd, 0x5d, 0xd5, 0xd3, 0x43, 0xf5, 0x50, 0xb2, 0xe2, 0x24, 0x37, 0xce, 0xfb, 0xf8, 0xbd, 0xea, + 0xaa, 0x57, 0xaf, 0xde, 0x7b, 0x55, 0x84, 0xf5, 0xae, 0x13, 0x1e, 0x0e, 0x0e, 0x1a, 0xb6, 0xd7, + 0x5f, 0xf3, 0x7c, 0xe2, 0xd2, 0x43, 0xa7, 0x13, 0xae, 0x59, 0xbe, 0xb3, 0x76, 0x30, 0x70, 0x7a, + 0xed, 0xb5, 0xe3, 0xf5, 0xb5, 0x2e, 0x71, 0x49, 0x60, 0x85, 0xa4, 0xdd, 0xf0, 0x03, 0x2f, 0xf4, + 0xd0, 0xcb, 0x89, 0x4a, 0x23, 0x56, 0x69, 0x58, 0xbe, 0xd3, 0xe0, 0x2a, 0x8d, 0xe3, 0xf5, 0x2b, + 0x6f, 0x29, 0xa8, 0x5d, 0xaf, 0xeb, 0xad, 0x71, 0xcd, 0x83, 0x41, 0x87, 0xff, 0xe2, 0x3f, 0xf8, + 0x5f, 0x02, 0xf1, 0x8a, 0x79, 0x74, 0x87, 0x36, 0x1c, 0x8f, 0x9b, 0xb5, 0xbd, 0x80, 0x64, 0x58, + 0xbd, 0x72, 0x33, 0x91, 0xe9, 0x5b, 0xf6, 0xa1, 0xe3, 0x92, 0xe0, 0x64, 0xcd, 0x3f, 0xea, 0x32, + 0x02, 0x5d, 0xeb, 0x93, 0xd0, 0xca, 0xd2, 0xba, 0x3d, 0x49, 0x2b, 0x18, 0xb8, 0xa1, 0xd3, 0x27, + 0x6b, 0xd4, 0x3e, 0x24, 0x7d, 0x2b, 0xad, 0x67, 0xfe, 0x7d, 0x09, 0x2e, 0x37, 0x1d, 0xd7, 0x0a, + 0x4e, 0x9a, 0xec, 0x9b, 0x30, 0xf9, 0x68, 0x40, 0x68, 0xf8, 0xc8, 0x0f, 0x1d, 0xcf, 0xa5, 0xe8, + 0x0f, 0xa0, 0xc2, 0x0c, 0xb6, 0xad, 0xd0, 0x5a, 0x31, 0xae, 0x19, 0xd7, 0x6b, 0x37, 0xbe, 0xd2, + 0x10, 0x86, 0x1a, 0xaa, 0xa1, 0x86, 0x7f, 0xd4, 0x65, 0x04, 0xda, 0x60, 0xd2, 0x8d, 0xe3, 0xf5, + 0xc6, 0xa3, 0x83, 0x0f, 0x89, 0x1d, 0x3e, 0x20, 0xa1, 0xd5, 0x44, 0x9f, 0x0d, 0xeb, 0xe7, 0x46, + 0xc3, 0x3a, 0x24, 0x34, 0x1c, 0xa3, 0xa2, 0xd7, 0x60, 0xc6, 0xa2, 0x77, 0x9d, 0x1e, 0x59, 0x29, + 0x5c, 0x33, 0xae, 0x57, 0x9b, 0xf3, 0x52, 0x7a, 0x66, 0x83, 0x53, 0xb1, 0xe4, 0xa2, 0xdb, 0x30, + 0x1f, 0x90, 0x63, 0x87, 0x3a, 0x9e, 0xbb, 0xe9, 0xf5, 0xfb, 0x4e, 0xb8, 0x52, 0xd4, 0xe5, 0x05, + 0x15, 0xa7, 0xa4, 0xd0, 0xd7, 0x60, 0x21, 0xa2, 0x3c, 0x20, 0x94, 0x5a, 0x5d, 0xb2, 0x52, 0xe2, + 0x8a, 0x0b, 0x52, 0x71, 0x56, 0x92, 0x71, 0x5a, 0x0e, 0x35, 0x01, 0x45, 0xa4, 0x8d, 0x41, 0x78, + 0xe8, 0x05, 0x0f, 0xad, 0x3e, 0x59, 0x29, 0x73, 0xed, 0xf8, 0xa3, 0x12, 0x0e, 0xce, 0x90, 0x46, + 0xdb, 0xb0, 0xac, 0x53, 0xb7, 0xfb, 0x96, 0xd3, 0x5b, 0x99, 0xe1, 0x20, 0xcb, 0x12, 0xa4, 0xa6, + 0xb0, 0x70, 0x96, 0x3c, 0xfa, 0x16, 0x5c, 0xd4, 0xbf, 0x2b, 0x24, 0x62, 0x34, 0xb3, 0x1c, 0xe8, + 0xa2, 0x04, 0x3a, 0xaf, 0x31, 0x71, 0xb6, 0x0e, 0x7a, 0x08, 0x2f, 0x8d, 0x31, 0xc4, 0xb0, 0x2a, + 0x1c, 0xed, 0x25, 0x89, 0x36, 0xaf, 0x73, 0xf1, 0x04, 0x2d, 0xf3, 0x1b, 0xb0, 0xa4, 0x78, 0x50, + 0xcb, 0x1b, 0x04, 0x36, 0x51, 0xd6, 0xd5, 0x38, 0x6d, 0x5d, 0xcd, 0x1f, 0x1a, 0x70, 0xb1, 0xe9, + 0x84, 0x07, 0x03, 0xfb, 0x88, 0x84, 0xef, 0x91, 0x83, 0xb7, 0x3d, 0xef, 0x68, 0xd3, 0x1a, 0x50, + 0x82, 0x3e, 0x02, 0xb0, 0xbd, 0x7e, 0xdf, 0x73, 0x5b, 0x3e, 0xb1, 0xa5, 0xf7, 0xdd, 0x6a, 0x3c, + 0x75, 0x4b, 0x36, 0x36, 0xb9, 0x92, 0x0a, 0xd5, 0xbc, 0x22, 0x8d, 0xa3, 0x71, 0x1e, 0x56, 0x8c, + 0x98, 0x3f, 0x2e, 0x40, 0x99, 0x7f, 0xc4, 0x0b, 0x70, 0xfc, 0x87, 0x50, 0xa2, 0xec, 0xc3, 0x0a, + 0x1c, 0xfd, 0xcd, 0x1c, 0x1f, 0x26, 0xa6, 0xd7, 0x27, 0x76, 0x73, 0x4e, 0x22, 0x97, 0xd8, 0x2f, + 0xcc, 0x71, 0xd0, 0xbb, 0x30, 0x43, 0x43, 0x2b, 0x1c, 0x50, 0xbe, 0x31, 0x6a, 0x37, 0x1a, 0xb9, + 0x11, 0xb9, 0x56, 0xb2, 0x40, 0xe2, 0x37, 0x96, 0x68, 0xe6, 0x3f, 0x16, 0x61, 0x9e, 0xcb, 0x6d, + 0x7a, 0x6e, 0xdb, 0x61, 0x61, 0x01, 0xdd, 0x86, 0x52, 0x78, 0xe2, 0x47, 0x2b, 0x6b, 0x46, 0x83, + 0xd9, 0x3f, 0xf1, 0xc9, 0x93, 0x61, 0x1d, 0xe9, 0xd2, 0x8c, 0x8a, 0xb9, 0x3c, 0xda, 0x8d, 0x87, + 0x28, 0xf6, 0xfa, 0x4d, 0xdd, 0xe4, 0x93, 0x61, 0x3d, 0x23, 0x3e, 0x36, 0x62, 0x24, 0x7d, 0x60, + 0xe8, 0x43, 0x98, 0xef, 0x59, 0x34, 0x7c, 0xc7, 0x6f, 0x5b, 0x21, 0xd9, 0x77, 0xfa, 0x84, 0xef, + 0xaa, 0xda, 0x8d, 0xdf, 0xca, 0xb7, 0x50, 0x4c, 0x23, 0x71, 0xf5, 0x5d, 0x0d, 0x09, 0xa7, 0x90, + 0xd1, 0x31, 0x20, 0x46, 0xd9, 0x0f, 0x2c, 0x97, 0x8a, 0xaf, 0x62, 0xf6, 0x8a, 0x53, 0xdb, 0x8b, + 0x1d, 0x71, 0x77, 0x0c, 0x0d, 0x67, 0x58, 0x60, 0xbb, 0x28, 0x20, 0x16, 0xf5, 0x5c, 0x19, 0xb4, + 0xe2, 0x45, 0xc2, 0x9c, 0x8a, 0x25, 0x17, 0xbd, 0x0e, 0xb3, 0x7d, 0x19, 0xdd, 0xca, 0xd9, 0xd1, + 0x2d, 0xe2, 0x9b, 0x7f, 0x55, 0x80, 0x5a, 0xb4, 0x42, 0x1d, 0xa7, 0xfb, 0x02, 0x3c, 0x7d, 0x5f, + 0xf3, 0xf4, 0x1b, 0x79, 0xfd, 0x52, 0x8c, 0x6f, 0xa2, 0xbf, 0x3f, 0x4e, 0xf9, 0xfb, 0xcd, 0x29, + 0x71, 0x4f, 0xf7, 0xfa, 0x9f, 0x1b, 0xb0, 0xa0, 0x48, 0xef, 0x3a, 0x34, 0x44, 0x8f, 0xc7, 0x66, + 0xaa, 0x91, 0x6f, 0xa6, 0x98, 0x36, 0x9f, 0xa7, 0x45, 0x69, 0xad, 0x12, 0x51, 0x94, 0x59, 0x6a, + 0x41, 0xd9, 0x09, 0x49, 0x9f, 0xed, 0x8d, 0xe2, 0x34, 0xdb, 0x57, 0x0c, 0xb0, 0x79, 0x5e, 0x42, + 0x97, 0x77, 0x18, 0x08, 0x16, 0x58, 0xe6, 0xaf, 0x8a, 0xda, 0x67, 0xb0, 0xe9, 0x43, 0x36, 0x54, + 0xc2, 0xc0, 0xe9, 0x76, 0x49, 0x40, 0x57, 0x0c, 0x6e, 0xeb, 0x56, 0x5e, 0x5b, 0xfb, 0x42, 0x6f, + 0xcf, 0xeb, 0x39, 0xf6, 0x49, 0xf2, 0x35, 0x92, 0x4c, 0x71, 0x0c, 0x8c, 0x36, 0xa0, 0x1a, 0x0c, + 0x5c, 0x21, 0x28, 0x77, 0xfb, 0x2b, 0x52, 0xbc, 0x8a, 0x23, 0xc6, 0x93, 0x61, 0x5d, 0x84, 0x96, + 0x98, 0x82, 0x13, 0x2d, 0x64, 0x69, 0xf1, 0x5f, 0x2c, 0xf2, 0x5b, 0xb9, 0xe3, 0x3f, 0xf7, 0x9b, + 0xd8, 0x2f, 0x13, 0x9a, 0x1a, 0xef, 0x51, 0x1b, 0xae, 0xd2, 0x81, 0x6d, 0x13, 0x4a, 0x3b, 0x83, + 0x1e, 0x1f, 0x09, 0x7d, 0xdb, 0xa1, 0xa1, 0x17, 0x9c, 0xec, 0x3a, 0x2c, 0xc5, 0x60, 0x9b, 0xae, + 0xdc, 0xbc, 0x36, 0x1a, 0xd6, 0xaf, 0xb6, 0x4e, 0x91, 0xc3, 0xa7, 0xa2, 0xa0, 0xf7, 0x61, 0xa5, + 0x63, 0x39, 0x3d, 0xd2, 0xce, 0xb0, 0x50, 0xe6, 0x16, 0xae, 0x8e, 0x86, 0xf5, 0x95, 0xbb, 0x13, + 0x64, 0xf0, 0x44, 0x6d, 0xf3, 0x3e, 0x2c, 0x8d, 0xb9, 0x34, 0xba, 0x05, 0x35, 0x16, 0x49, 0xde, + 0x25, 0x01, 0x3b, 0xab, 0xb9, 0xa7, 0x16, 0x93, 0x54, 0x63, 0x37, 0x61, 0x61, 0x55, 0xce, 0xfc, + 0xa9, 0x01, 0x55, 0x0e, 0xf6, 0x02, 0x7c, 0xfd, 0x81, 0xee, 0xeb, 0xd7, 0xf3, 0xfa, 0xdf, 0x04, + 0x2f, 0x07, 0xa8, 0x88, 0x91, 0x7b, 0x5d, 0xf3, 0xfb, 0x25, 0xe9, 0xf1, 0xbb, 0x5e, 0x37, 0xca, + 0x62, 0xd7, 0xa0, 0x6a, 0x7b, 0x6e, 0x68, 0xb1, 0x21, 0xcb, 0x43, 0x6b, 0x29, 0x72, 0xc6, 0xcd, + 0x88, 0x81, 0x13, 0x19, 0x16, 0x76, 0x3b, 0x5e, 0xaf, 0xe7, 0x7d, 0xcc, 0x5d, 0xb7, 0x92, 0x44, + 0x89, 0xbb, 0x9c, 0x8a, 0x25, 0x17, 0xbd, 0x09, 0x15, 0x9f, 0x25, 0x45, 0x9e, 0x8c, 0x42, 0x95, + 0xe4, 0xab, 0xf7, 0x24, 0x1d, 0xc7, 0x12, 0xe8, 0x26, 0xcc, 0x51, 0xc7, 0xb5, 0x49, 0x8b, 0xd8, + 0x9e, 0xdb, 0xa6, 0xdc, 0xbb, 0x8a, 0xcd, 0xc5, 0xd1, 0xb0, 0x3e, 0xd7, 0x52, 0xe8, 0x58, 0x93, + 0x42, 0xef, 0x41, 0x95, 0xff, 0xe6, 0x27, 0x4e, 0x79, 0xea, 0x13, 0xe7, 0x3c, 0xfb, 0xc8, 0x56, + 0x04, 0x80, 0x13, 0x2c, 0x74, 0x03, 0x80, 0x15, 0x06, 0x34, 0xb4, 0xfa, 0x3e, 0xe5, 0x67, 0x67, + 0x25, 0xd9, 0x30, 0xfb, 0x31, 0x07, 0x2b, 0x52, 0xe8, 0x0d, 0xa8, 0x86, 0x96, 0xd3, 0xdb, 0x75, + 0x5c, 0x42, 0x79, 0xee, 0x59, 0x14, 0x06, 0xf6, 0x23, 0x22, 0x4e, 0xf8, 0xa8, 0x01, 0xd0, 0x63, + 0x6e, 0xda, 0x3c, 0x09, 0x09, 0xe5, 0xb9, 0x65, 0xb1, 0x39, 0xcf, 0xc0, 0x77, 0x63, 0x2a, 0x56, + 0x24, 0xd8, 0xac, 0xbb, 0xde, 0xc7, 0x96, 0x13, 0xae, 0x54, 0xf5, 0x59, 0x7f, 0xe8, 0xbd, 0x67, + 0x39, 0x21, 0x96, 0x5c, 0xf4, 0x2a, 0xcc, 0x1e, 0x4b, 0xe7, 0x06, 0x0e, 0x5a, 0x63, 0x07, 0x5d, + 0xe4, 0xd4, 0x11, 0xcf, 0xfc, 0x61, 0x74, 0xd0, 0x3d, 0x1a, 0x84, 0xfe, 0x20, 0x44, 0xdf, 0x80, + 0x42, 0xe8, 0x49, 0x67, 0x7e, 0x45, 0x99, 0xc1, 0x06, 0x4b, 0x34, 0x92, 0x03, 0x0d, 0x93, 0x0e, + 0x09, 0x88, 0x6b, 0x93, 0xe6, 0xcc, 0x68, 0x58, 0x2f, 0xec, 0x7b, 0xb8, 0x10, 0x7a, 0xe8, 0x7d, + 0x00, 0x7f, 0x40, 0x0f, 0x5b, 0xc4, 0x0e, 0x48, 0x28, 0x4f, 0xb2, 0xeb, 0x59, 0x20, 0xbb, 0x9e, + 0x6d, 0xf5, 0xd2, 0x48, 0xfc, 0xab, 0xf7, 0x62, 0x7d, 0xac, 0x60, 0xa1, 0x36, 0xd4, 0x9c, 0xbe, + 0xd5, 0x25, 0xbb, 0xd6, 0x01, 0xe9, 0x31, 0x37, 0x2a, 0xe6, 0x8c, 0x73, 0x3b, 0xb1, 0x56, 0xb2, + 0xbb, 0x13, 0x1a, 0xc5, 0x2a, 0xac, 0xf9, 0x47, 0x06, 0x2c, 0xf3, 0xc9, 0xd8, 0xf3, 0x68, 0x28, + 0xf2, 0x77, 0x1e, 0x01, 0x5f, 0x85, 0x59, 0x16, 0x0f, 0x2d, 0xb7, 0xcd, 0xcf, 0x82, 0xaa, 0x98, + 0xcb, 0x4d, 0x41, 0xc2, 0x11, 0x0f, 0x5d, 0x85, 0x92, 0x15, 0x74, 0xc5, 0x7e, 0xad, 0x36, 0x2b, + 0xec, 0x28, 0xde, 0x08, 0xba, 0x14, 0x73, 0x2a, 0x5b, 0x38, 0x6a, 0x07, 0x8e, 0x3f, 0x56, 0x93, + 0xb5, 0x38, 0x15, 0x4b, 0xae, 0xf9, 0xf3, 0x59, 0x98, 0x53, 0xab, 0xcc, 0x17, 0x90, 0x7b, 0x7c, + 0x00, 0x95, 0xa8, 0x6a, 0x91, 0xab, 0xb6, 0x9e, 0x63, 0x6a, 0x45, 0x0d, 0x83, 0xa5, 0x62, 0x73, + 0x8e, 0x6d, 0xe8, 0xe8, 0x17, 0x8e, 0x01, 0x11, 0x81, 0x45, 0x79, 0xe0, 0x91, 0x76, 0xf3, 0x84, + 0xcf, 0xbd, 0x3c, 0xa7, 0x72, 0xf9, 0xd7, 0x85, 0xd1, 0xb0, 0xbe, 0xb8, 0x9f, 0x02, 0xc0, 0x63, + 0x90, 0x68, 0x03, 0x4a, 0x9d, 0xc0, 0xeb, 0xf3, 0x78, 0x91, 0x13, 0x9a, 0xaf, 0xd0, 0xdd, 0xc0, + 0xeb, 0x63, 0xae, 0x8a, 0xde, 0x87, 0x99, 0x03, 0x5e, 0xa2, 0xc9, 0x08, 0x92, 0x2b, 0x59, 0x4a, + 0xd7, 0x74, 0x4d, 0x60, 0x6b, 0x2a, 0xc8, 0x58, 0xe2, 0xa1, 0x75, 0xfd, 0xb4, 0x99, 0xe1, 0x1b, + 0x72, 0xe1, 0xb4, 0x93, 0x06, 0x7d, 0x0d, 0x8a, 0xc4, 0x3d, 0x5e, 0x99, 0xe5, 0x9e, 0x7e, 0x25, + 0xeb, 0x73, 0xb6, 0xdd, 0xe3, 0x77, 0xad, 0xa0, 0x59, 0x93, 0x4b, 0x5b, 0xdc, 0x76, 0x8f, 0x31, + 0xd3, 0x41, 0x47, 0x50, 0x53, 0xa6, 0x67, 0xa5, 0xc2, 0x21, 0x6e, 0x4e, 0x99, 0xbe, 0x88, 0x9a, + 0x30, 0xde, 0x33, 0xca, 0x0a, 0x60, 0x15, 0x1d, 0xfd, 0xc0, 0x80, 0x8b, 0x6d, 0xcf, 0x3e, 0x22, + 0x41, 0x2b, 0x0c, 0xac, 0x90, 0x74, 0x4f, 0xe4, 0x81, 0xc2, 0xe3, 0x53, 0xed, 0xc6, 0x9d, 0x1c, + 0x76, 0xb7, 0xb2, 0xf4, 0x9b, 0x97, 0x47, 0xc3, 0xfa, 0xc5, 0x4c, 0x16, 0xce, 0xb6, 0xc8, 0xc7, + 0x42, 0xf9, 0x2a, 0xa4, 0xc7, 0x02, 0xb9, 0xc7, 0xd2, 0xca, 0xd2, 0x17, 0x63, 0xc9, 0x64, 0xe1, + 0x6c, 0x8b, 0xe6, 0xbf, 0x94, 0x65, 0x60, 0x95, 0xa5, 0xfe, 0x57, 0xb5, 0x72, 0xb0, 0x9e, 0x2a, + 0x07, 0x17, 0x14, 0x51, 0xa5, 0x16, 0x4c, 0x3c, 0xb2, 0xf0, 0x9c, 0x3d, 0xb2, 0x01, 0x20, 0xe6, + 0xb0, 0xe3, 0xf4, 0x48, 0x14, 0x91, 0x58, 0x80, 0xd8, 0x8a, 0xa9, 0x58, 0x91, 0x40, 0xbb, 0x50, + 0xec, 0xca, 0x5c, 0x2f, 0x5f, 0x74, 0xb8, 0xe7, 0x84, 0xea, 0x18, 0x66, 0x99, 0x87, 0xde, 0x73, + 0x42, 0xcc, 0x60, 0x58, 0x19, 0xce, 0xe3, 0x2e, 0x5d, 0x29, 0xe7, 0xce, 0xe3, 0xf9, 0x36, 0x97, + 0x68, 0x71, 0xec, 0xe4, 0x44, 0x8a, 0x25, 0x1a, 0x3b, 0xad, 0x59, 0x7e, 0x42, 0x3e, 0x09, 0xb7, + 0x9c, 0x40, 0xf6, 0x8f, 0x94, 0xf4, 0x36, 0xe2, 0x60, 0x45, 0x0a, 0x7d, 0x07, 0xe6, 0xe4, 0x0a, + 0x8a, 0x63, 0x6b, 0x76, 0xca, 0x63, 0x4b, 0xa4, 0x26, 0x0a, 0x02, 0xd6, 0xf0, 0xd0, 0xef, 0xc3, + 0x2c, 0xe5, 0x7f, 0xd1, 0x29, 0x76, 0xa2, 0xd0, 0x55, 0x27, 0x30, 0xae, 0x55, 0x05, 0x8b, 0xe2, + 0x08, 0x15, 0x1d, 0xf1, 0x8f, 0xee, 0x38, 0xdd, 0x07, 0x96, 0xcf, 0x76, 0x1d, 0xb3, 0xf1, 0xdb, + 0xb9, 0x4a, 0x00, 0xa9, 0xa4, 0x9a, 0x51, 0x67, 0x4b, 0x42, 0x62, 0x05, 0xde, 0xfc, 0xd7, 0x28, + 0x01, 0xe6, 0x07, 0xa3, 0x95, 0xd1, 0x7d, 0x7a, 0xce, 0xd5, 0x47, 0x2a, 0x98, 0x15, 0x7e, 0x9d, + 0xc1, 0xcc, 0xfc, 0xcf, 0xd9, 0x68, 0xd3, 0x8a, 0x2a, 0x61, 0x1d, 0xca, 0xfe, 0xa1, 0x45, 0xa3, + 0x5d, 0xfb, 0xa5, 0x28, 0xb1, 0xde, 0x63, 0xc4, 0x27, 0xc3, 0x3a, 0x88, 0x6c, 0x81, 0xfd, 0xc2, + 0x42, 0x92, 0xa7, 0xd1, 0x96, 0x6b, 0x93, 0x5e, 0x8f, 0xb4, 0x65, 0x62, 0x9c, 0xa4, 0xd1, 0x11, + 0x03, 0x27, 0x32, 0xe8, 0x76, 0xdc, 0xbd, 0x10, 0xbb, 0x70, 0x55, 0xef, 0x5e, 0x3c, 0x61, 0xde, + 0x25, 0xca, 0xee, 0x89, 0xdd, 0x8c, 0xd2, 0xe9, 0xdd, 0x0c, 0xd4, 0x81, 0x79, 0x1a, 0x5a, 0x41, + 0x18, 0xe7, 0xab, 0x67, 0x48, 0x91, 0xd1, 0x68, 0x58, 0x9f, 0x6f, 0x69, 0x28, 0x38, 0x85, 0x8a, + 0x06, 0xb0, 0x6c, 0x7b, 0x7d, 0xbf, 0x47, 0xa2, 0xd6, 0x8c, 0x30, 0x36, 0x7d, 0xc7, 0xe9, 0xd2, + 0x68, 0x58, 0x5f, 0xde, 0x1c, 0x87, 0xc2, 0x59, 0xf8, 0xe8, 0x77, 0xa0, 0xd2, 0x1e, 0x04, 0x16, + 0x23, 0xca, 0x74, 0xfb, 0xe5, 0xa8, 0xc0, 0xd8, 0x92, 0xf4, 0x27, 0xc3, 0xfa, 0x79, 0x96, 0xa1, + 0x37, 0x22, 0x02, 0x8e, 0x55, 0xd0, 0x01, 0x5c, 0xf1, 0x78, 0xf2, 0x2b, 0x42, 0x9f, 0x48, 0x30, + 0xa2, 0xed, 0x2d, 0xbb, 0xbd, 0x51, 0xfb, 0xee, 0xca, 0xa3, 0x89, 0x92, 0xf8, 0x14, 0x14, 0x74, + 0x0f, 0x66, 0xc4, 0x26, 0x92, 0xa7, 0x62, 0xae, 0xfc, 0x04, 0x44, 0xc7, 0x9e, 0xa9, 0x61, 0xa9, + 0x8e, 0x1e, 0xc3, 0x8c, 0x30, 0x23, 0x8f, 0xb4, 0x9b, 0xd3, 0x35, 0x30, 0xc5, 0xf0, 0x93, 0xf8, + 0x29, 0x7e, 0x63, 0x89, 0x89, 0xf6, 0x79, 0xbb, 0x88, 0xc5, 0xe5, 0x1a, 0xdf, 0x67, 0x79, 0x1a, + 0xae, 0x2d, 0xa6, 0xb0, 0xe3, 0x76, 0x3c, 0xad, 0x4d, 0xc4, 0xa3, 0xb2, 0xc0, 0x62, 0x51, 0xb9, + 0xe7, 0x75, 0x5b, 0xae, 0xe3, 0xfb, 0x24, 0x5c, 0x99, 0xd3, 0xa3, 0xf2, 0x6e, 0xcc, 0xc1, 0x8a, + 0x14, 0x22, 0x3c, 0xa8, 0x89, 0x96, 0x26, 0x5d, 0x39, 0xcf, 0x47, 0xb3, 0x3e, 0x45, 0xb7, 0x47, + 0x68, 0x6a, 0xe1, 0x4c, 0x82, 0x61, 0x05, 0xd8, 0xb4, 0x65, 0x6f, 0x40, 0x9d, 0x1d, 0xf4, 0x50, + 0xa9, 0x81, 0x6e, 0x9f, 0x65, 0x7e, 0xf7, 0x3d, 0xb5, 0x2c, 0x32, 0x77, 0x65, 0x55, 0xa1, 0x8b, + 0xa0, 0x5b, 0xb2, 0xa6, 0xd9, 0x72, 0xba, 0x84, 0x86, 0x32, 0xc4, 0xe8, 0x45, 0x8a, 0x60, 0x61, + 0x55, 0xce, 0xfc, 0x59, 0x09, 0xce, 0x4b, 0x38, 0x91, 0x71, 0xa0, 0x5b, 0x5a, 0x6a, 0xf1, 0x72, + 0x2a, 0xb5, 0x58, 0xd2, 0x84, 0x95, 0xe4, 0x22, 0x80, 0x79, 0x3d, 0x8d, 0x92, 0x49, 0xc6, 0xed, + 0xdc, 0x19, 0x9b, 0x86, 0x2c, 0x22, 0x84, 0x9e, 0xaf, 0xe1, 0x94, 0x05, 0x66, 0x53, 0x4f, 0x97, + 0x64, 0x29, 0x70, 0x3b, 0x77, 0x66, 0x96, 0x61, 0x53, 0xcf, 0xcb, 0x70, 0xca, 0x02, 0xb3, 0x69, + 0x0f, 0x68, 0xe8, 0xf5, 0x63, 0x9b, 0xa5, 0xdc, 0x36, 0x37, 0xb9, 0x62, 0x86, 0xcd, 0x4d, 0x0d, + 0x11, 0xa7, 0x2c, 0xa0, 0x9f, 0x18, 0x70, 0xe9, 0x43, 0xe2, 0x1e, 0x39, 0x2e, 0xdd, 0x73, 0x7c, + 0xd2, 0x73, 0xdc, 0xe4, 0x8b, 0x45, 0xec, 0xfd, 0xdd, 0x1c, 0xd6, 0xef, 0xeb, 0x08, 0xfa, 0x30, + 0xbe, 0x34, 0x1a, 0xd6, 0x2f, 0xdd, 0xcf, 0xb6, 0x81, 0x27, 0x19, 0x37, 0xbf, 0x57, 0x96, 0x1e, + 0xaf, 0x9e, 0x8c, 0xea, 0x59, 0x62, 0x3c, 0xe5, 0x2c, 0x09, 0x60, 0x9e, 0xdf, 0x8e, 0x3a, 0xb6, + 0xbc, 0x20, 0x9a, 0xc2, 0x6b, 0xee, 0x69, 0x8a, 0xe2, 0x50, 0xe6, 0xb3, 0xa9, 0x33, 0x70, 0xca, + 0x02, 0x72, 0xe1, 0xbc, 0x00, 0x8f, 0x4c, 0x16, 0x73, 0xdf, 0x73, 0xdd, 0x73, 0xc2, 0xb7, 0x63, + 0x3d, 0x61, 0x71, 0x69, 0x34, 0xac, 0x9f, 0xd7, 0xe8, 0x58, 0x87, 0x47, 0x03, 0x58, 0xe4, 0x3b, + 0x6e, 0xf3, 0xd0, 0x72, 0xbb, 0x62, 0xda, 0xa5, 0xcf, 0x7c, 0x35, 0x6f, 0xa2, 0x2a, 0x54, 0x85, + 0x41, 0x5e, 0xc2, 0xee, 0xa4, 0x00, 0xf1, 0x98, 0x09, 0xf9, 0x99, 0x3d, 0x2b, 0xfe, 0xcc, 0xf2, + 0x34, 0x9f, 0xb9, 0x6b, 0x65, 0x7f, 0x66, 0x42, 0xc7, 0x3a, 0x3c, 0xfa, 0x2e, 0x2c, 0x1e, 0xa4, + 0x2e, 0x15, 0xe5, 0x59, 0x7d, 0x27, 0x57, 0x9d, 0x91, 0x71, 0x1f, 0x29, 0xbe, 0x35, 0xcd, 0xc2, + 0x63, 0x76, 0xcc, 0x9f, 0x96, 0x00, 0x8d, 0x77, 0xcb, 0xd1, 0x4d, 0x2d, 0x94, 0x5d, 0x4b, 0x85, + 0xb2, 0x45, 0x55, 0x43, 0x89, 0x64, 0x8f, 0x61, 0x46, 0x8c, 0x77, 0x8a, 0xee, 0x85, 0x1c, 0x88, + 0x04, 0xcb, 0x72, 0x0a, 0x89, 0xc9, 0x12, 0x78, 0xe9, 0x8f, 0xd2, 0xef, 0xce, 0x00, 0x9f, 0xe5, + 0xe5, 0x11, 0x2a, 0x3a, 0x94, 0x07, 0x81, 0xf0, 0x05, 0xe9, 0x69, 0xb7, 0xa6, 0xf3, 0xb4, 0xc8, + 0xd0, 0x42, 0x7c, 0x76, 0x08, 0x3a, 0x56, 0xa1, 0xe5, 0x44, 0xf5, 0xac, 0x03, 0xe9, 0x5a, 0xcf, + 0x30, 0x51, 0x8a, 0x5b, 0x49, 0x4c, 0x44, 0xa0, 0x1a, 0xaf, 0xb3, 0x74, 0xa4, 0x33, 0x18, 0xc8, + 0xf6, 0xa0, 0x04, 0xd9, 0xfc, 0xc5, 0x0c, 0x28, 0xc5, 0x02, 0xfa, 0x26, 0xcc, 0x53, 0x12, 0x1c, + 0x3b, 0x36, 0xd9, 0xb0, 0x6d, 0x6f, 0xe0, 0x46, 0x27, 0x69, 0x7c, 0x6b, 0xd9, 0xd2, 0xb8, 0x38, + 0x25, 0xcd, 0xaf, 0x84, 0xf9, 0x81, 0x21, 0x9d, 0x27, 0xff, 0x95, 0x70, 0xaa, 0x16, 0x95, 0xfd, + 0x30, 0x89, 0xa6, 0x35, 0xd5, 0x8a, 0xcf, 0xbb, 0xa9, 0xf6, 0x1d, 0xa8, 0x50, 0xfd, 0x34, 0xfb, + 0x4a, 0xfe, 0x44, 0x45, 0x1e, 0x20, 0x71, 0x17, 0x3e, 0x3e, 0x35, 0x62, 0x4c, 0x36, 0x29, 0x32, + 0xcd, 0x2c, 0x4f, 0x37, 0x29, 0x4f, 0x49, 0x30, 0x7f, 0x0f, 0xaa, 0x01, 0x11, 0x13, 0x44, 0xa5, + 0x8b, 0x64, 0x56, 0xda, 0x58, 0x0a, 0x61, 0xf2, 0xd1, 0xc0, 0x09, 0x48, 0x9f, 0xb8, 0x21, 0x4d, + 0xea, 0xa8, 0x88, 0x4b, 0x71, 0x82, 0x86, 0x3e, 0x04, 0xf0, 0xe3, 0xb6, 0xad, 0xac, 0xe2, 0x73, + 0x67, 0x6f, 0x7a, 0xc3, 0x37, 0x49, 0x1b, 0x13, 0x3a, 0x56, 0xd0, 0xd1, 0x07, 0x70, 0x39, 0x29, + 0x44, 0xb6, 0x88, 0xd5, 0xe6, 0x67, 0xac, 0xbc, 0xb1, 0x10, 0x3d, 0xfc, 0x2f, 0x8f, 0x86, 0xf5, + 0xcb, 0x9b, 0x93, 0x84, 0xf0, 0x64, 0x7d, 0xd4, 0x87, 0x39, 0xd7, 0x6b, 0x93, 0x16, 0xe9, 0x11, + 0x3b, 0xf4, 0x02, 0x59, 0x31, 0xe4, 0xa9, 0xe8, 0x45, 0xef, 0xc9, 0xea, 0x3d, 0x54, 0xd4, 0x45, + 0x7f, 0x42, 0xa5, 0x60, 0x0d, 0xde, 0xfc, 0x53, 0x03, 0x32, 0x5e, 0x7c, 0x68, 0xee, 0x6b, 0x3c, + 0x6f, 0xf7, 0x7d, 0x0d, 0x66, 0x68, 0x72, 0x49, 0xa0, 0xf6, 0xc2, 0x45, 0x07, 0x45, 0x72, 0xcd, + 0xbf, 0x35, 0xe0, 0x42, 0x56, 0x9b, 0x82, 0xf9, 0x51, 0xdc, 0x94, 0x90, 0xc3, 0xcb, 0xdf, 0xb1, + 0x51, 0xaf, 0xb5, 0x04, 0x04, 0x4e, 0xd0, 0x58, 0x3c, 0x69, 0x13, 0x1a, 0x3a, 0x2e, 0xaf, 0x0e, + 0xb7, 0x9c, 0x40, 0x8e, 0x31, 0x8e, 0x27, 0x5b, 0x1a, 0x17, 0xa7, 0xa4, 0xcd, 0x1f, 0x95, 0x60, + 0x39, 0x23, 0x6d, 0x44, 0xdb, 0xb2, 0x41, 0x3d, 0xc5, 0xdd, 0x4a, 0x7c, 0xa3, 0xaf, 0x35, 0xa9, + 0xc1, 0x1f, 0xf4, 0x7a, 0xcf, 0x76, 0xc7, 0x12, 0xe9, 0x63, 0x05, 0x2b, 0xea, 0x38, 0x17, 0xcf, + 0xd0, 0x71, 0xbe, 0x0f, 0x88, 0x7c, 0xe2, 0x7b, 0x94, 0xc8, 0xf4, 0xdf, 0xe3, 0x47, 0x40, 0x89, + 0x77, 0x3f, 0xe2, 0xd7, 0x1c, 0xdb, 0x63, 0x12, 0x38, 0x43, 0x0b, 0xad, 0x41, 0xb5, 0xe3, 0x05, + 0x36, 0x61, 0xa3, 0xe4, 0xd1, 0x47, 0x69, 0xa0, 0xdc, 0x8d, 0x18, 0x38, 0x91, 0x41, 0xef, 0x27, + 0x0d, 0xb6, 0x99, 0xdc, 0xf7, 0x42, 0xe2, 0x9b, 0xf9, 0x66, 0x9f, 0xdc, 0x59, 0xdb, 0x80, 0x05, + 0xae, 0xb0, 0xb1, 0xb7, 0x13, 0xb5, 0xee, 0xc5, 0x53, 0xb2, 0x4b, 0x52, 0x45, 0xb4, 0x6d, 0x13, + 0x36, 0x4e, 0xcb, 0x9b, 0x9f, 0x95, 0x60, 0x39, 0xa3, 0x58, 0x8a, 0xaf, 0x2b, 0x8c, 0x67, 0xb9, + 0xae, 0xf8, 0x75, 0x79, 0xc2, 0xeb, 0x30, 0xeb, 0x7a, 0x9b, 0x96, 0x7d, 0x48, 0xe4, 0x85, 0x6d, + 0x3c, 0x45, 0x0f, 0x05, 0x19, 0x47, 0xfc, 0xc8, 0x69, 0x4a, 0x67, 0x70, 0x9a, 0xa9, 0x17, 0xfa, + 0x9b, 0x51, 0xc1, 0xda, 0x71, 0x7a, 0x64, 0xcf, 0x0a, 0x0f, 0x65, 0x87, 0x37, 0xd9, 0x99, 0x1a, + 0x17, 0xa7, 0xa4, 0xd1, 0xb7, 0xa0, 0x2a, 0x96, 0x27, 0xe8, 0xd2, 0x1c, 0x17, 0x2b, 0xf1, 0x60, + 0x9a, 0x91, 0x12, 0x4e, 0xf4, 0x91, 0x0f, 0x97, 0x78, 0x66, 0xc5, 0x62, 0x6e, 0xdf, 0xf9, 0x2e, + 0xdf, 0xfe, 0xf2, 0x25, 0x87, 0x68, 0x19, 0xdd, 0x66, 0xf5, 0xd9, 0x4e, 0xb6, 0xc8, 0x93, 0xc9, + 0x2c, 0x3c, 0x09, 0xd6, 0xfc, 0x91, 0x01, 0xd9, 0xd7, 0x21, 0xfa, 0x87, 0x19, 0xcf, 0xf8, 0x61, + 0xaf, 0x26, 0x8b, 0x2f, 0xda, 0x97, 0xb5, 0xac, 0x85, 0x37, 0xff, 0xcc, 0x80, 0xe5, 0x8c, 0x7a, + 0xee, 0x37, 0xe3, 0xdc, 0xf8, 0xbc, 0x90, 0x1e, 0xdc, 0xf6, 0x31, 0x71, 0xc3, 0xb3, 0x5d, 0xc2, + 0x6c, 0x8b, 0xab, 0x8f, 0x82, 0xec, 0x62, 0xe6, 0x2a, 0xc6, 0x78, 0x3f, 0x4c, 0xbf, 0xf3, 0x78, + 0x86, 0xf0, 0x3a, 0xf9, 0x8e, 0xad, 0xf4, 0xa2, 0xef, 0xd8, 0xcc, 0xbf, 0x33, 0x60, 0x5e, 0xbf, + 0xdb, 0x41, 0x5f, 0x86, 0xe2, 0x20, 0x70, 0xe4, 0xa4, 0xc6, 0xa3, 0x7f, 0x07, 0xef, 0x60, 0x46, + 0x67, 0xec, 0x80, 0x74, 0xe4, 0x8a, 0xc5, 0x6c, 0x4c, 0x3a, 0x98, 0xd1, 0x11, 0x81, 0x9a, 0x1f, + 0x78, 0x9f, 0x9c, 0x88, 0xc3, 0x78, 0x8a, 0x77, 0x99, 0x7b, 0x89, 0x56, 0xd2, 0x36, 0x53, 0x88, + 0x58, 0xc5, 0xe5, 0x69, 0xce, 0x78, 0x33, 0xe0, 0x37, 0xc3, 0x5d, 0x7f, 0x51, 0x80, 0x59, 0xe9, + 0x34, 0xe8, 0x23, 0x98, 0xef, 0x6a, 0xd3, 0x3b, 0xc5, 0xb0, 0x52, 0x77, 0x6e, 0x71, 0x5c, 0xd4, + 0xe9, 0x38, 0x65, 0x00, 0xfd, 0x21, 0x2c, 0x75, 0x9d, 0x50, 0xff, 0xa6, 0x29, 0x2e, 0x1c, 0xef, + 0xa5, 0x75, 0x9b, 0x97, 0xa5, 0xe1, 0xa5, 0x31, 0x16, 0x1e, 0xb7, 0x84, 0x1e, 0x41, 0x29, 0x20, + 0x9d, 0x69, 0x1e, 0x75, 0xb0, 0x3d, 0x45, 0x3a, 0x7c, 0x8f, 0xc5, 0x29, 0x12, 0x26, 0x1d, 0x8a, + 0x39, 0x90, 0xf9, 0x7d, 0xb1, 0xd4, 0xa9, 0x86, 0xc8, 0xff, 0xc4, 0x53, 0xe9, 0xff, 0x32, 0x00, + 0x92, 0xc1, 0xfe, 0xff, 0x5b, 0x5b, 0xf3, 0x6f, 0x0a, 0x30, 0x2e, 0xc8, 0xf6, 0x85, 0x2d, 0xca, + 0x34, 0x23, 0xf3, 0xdf, 0x13, 0x24, 0x17, 0x3d, 0x86, 0x19, 0x8b, 0xbf, 0xef, 0x9f, 0x62, 0xc4, + 0xc2, 0xd4, 0xa6, 0xe7, 0x86, 0x81, 0xd7, 0x7b, 0x87, 0x92, 0x40, 0x79, 0x54, 0xcf, 0xb1, 0xb0, + 0xc4, 0x44, 0x84, 0xd5, 0x10, 0xf2, 0x8d, 0xfe, 0x14, 0xcf, 0x63, 0xc7, 0x0d, 0x28, 0xf5, 0x84, + 0x84, 0xc3, 0x09, 0xf2, 0x14, 0xf7, 0x74, 0xe6, 0x0f, 0x0c, 0x58, 0x4c, 0x77, 0x0f, 0x99, 0x3e, + 0xcf, 0x08, 0x76, 0xb6, 0xd2, 0xbd, 0xd9, 0x1d, 0x41, 0xc6, 0x11, 0x1f, 0xdd, 0x87, 0x59, 0x96, + 0x19, 0x62, 0x19, 0x6d, 0x73, 0xe6, 0x95, 0xfc, 0x7c, 0xbf, 0x2b, 0xf4, 0x70, 0x04, 0x60, 0xfe, + 0x93, 0x01, 0x68, 0xbc, 0xbf, 0x84, 0xf6, 0xe0, 0x82, 0x78, 0x81, 0x2d, 0x2f, 0x4d, 0x77, 0xb4, + 0xa1, 0x5d, 0x95, 0x43, 0xbb, 0xb0, 0x9b, 0x21, 0x83, 0x33, 0x35, 0xe3, 0x4c, 0xb8, 0x70, 0xf6, + 0x4c, 0xf8, 0x35, 0x98, 0xf1, 0xd9, 0x5c, 0xb5, 0x65, 0xba, 0x1a, 0xaf, 0xf8, 0x1e, 0xa7, 0x62, + 0xc9, 0x35, 0x5b, 0x00, 0xc9, 0xdb, 0x2f, 0x74, 0x0d, 0x4a, 0xae, 0xd5, 0x8f, 0x92, 0x81, 0x38, + 0x90, 0xf0, 0x7f, 0x0a, 0xe1, 0x1c, 0xf4, 0x0a, 0x94, 0x8f, 0xad, 0xde, 0x20, 0xfa, 0xaf, 0x9b, + 0xf8, 0x5d, 0xe5, 0xbb, 0x8c, 0x88, 0x05, 0xcf, 0xfc, 0xf3, 0x02, 0xd4, 0x94, 0xb7, 0x09, 0xcf, + 0xab, 0xce, 0x7b, 0x09, 0x0a, 0x16, 0xe5, 0x79, 0x75, 0x55, 0x5c, 0x26, 0x6d, 0x50, 0x5c, 0xb0, + 0x28, 0x7a, 0x0f, 0xca, 0xbe, 0x15, 0x1e, 0x46, 0xaf, 0x42, 0x6f, 0x4c, 0xf7, 0x72, 0x82, 0xe5, + 0xc1, 0xc9, 0x77, 0xb0, 0x5f, 0x14, 0x0b, 0xbc, 0x54, 0x39, 0x51, 0x7c, 0x7e, 0xe5, 0x84, 0xf9, + 0x3d, 0x03, 0x16, 0x52, 0x63, 0x40, 0x37, 0x00, 0x68, 0xfc, 0x4b, 0x2e, 0x41, 0xdc, 0x75, 0x49, + 0xe4, 0xb0, 0x22, 0xf5, 0xcc, 0x95, 0xf9, 0x3f, 0x1b, 0x70, 0xf5, 0xb4, 0x2b, 0x15, 0x56, 0xef, + 0xc9, 0x7b, 0x93, 0xb8, 0xc2, 0x30, 0xf4, 0x7a, 0xef, 0xbe, 0xce, 0xc6, 0x69, 0x79, 0x74, 0x0b, + 0x6a, 0x0a, 0x49, 0x0e, 0x30, 0xce, 0x4e, 0x14, 0x75, 0xac, 0xca, 0x3d, 0x43, 0x72, 0x68, 0xfe, + 0x83, 0x01, 0x17, 0xb2, 0x1a, 0x3f, 0xa8, 0x1b, 0xbd, 0x1f, 0x16, 0x15, 0x41, 0xf3, 0x8c, 0x0d, + 0xa4, 0x06, 0x7f, 0x45, 0xbc, 0xed, 0x86, 0xc1, 0x49, 0xf6, 0xcb, 0xe2, 0x2b, 0x77, 0x00, 0x12, + 0x19, 0xb4, 0x08, 0xc5, 0x23, 0x72, 0x22, 0x26, 0x0e, 0xb3, 0x3f, 0xd1, 0x05, 0x6d, 0x1b, 0xc9, + 0x7d, 0xf3, 0xf5, 0xc2, 0x1d, 0xe3, 0xeb, 0x95, 0x3f, 0xf9, 0x8b, 0xfa, 0xb9, 0x4f, 0x7f, 0x75, + 0xed, 0x9c, 0xf9, 0x63, 0x03, 0xd4, 0xdc, 0x0d, 0xbd, 0x01, 0xd5, 0xc3, 0x30, 0xf4, 0x39, 0x49, + 0x3e, 0x8c, 0xe0, 0x6f, 0x68, 0xdf, 0xde, 0xdf, 0xdf, 0xe3, 0x44, 0x9c, 0xf0, 0x51, 0x03, 0x80, + 0xfd, 0xa0, 0x42, 0xba, 0x94, 0x3c, 0x66, 0x62, 0xd2, 0x2d, 0x21, 0xae, 0x48, 0x88, 0x12, 0x47, + 0x08, 0x8b, 0x7f, 0x04, 0x91, 0x25, 0x8e, 0x90, 0x8c, 0x78, 0xe6, 0x5f, 0x1b, 0xb0, 0x34, 0xf6, + 0x10, 0x07, 0xed, 0xc5, 0x49, 0xdd, 0xb4, 0x7d, 0xa7, 0x09, 0xe9, 0xdf, 0x33, 0xfb, 0xf5, 0x1d, + 0xb8, 0x20, 0x10, 0xb9, 0xd5, 0xe4, 0xd1, 0xc1, 0x53, 0x03, 0x9c, 0xf9, 0x97, 0x06, 0x40, 0xd2, + 0x09, 0x41, 0x07, 0x30, 0x27, 0x86, 0xa4, 0x65, 0x27, 0xf9, 0x3f, 0xf0, 0x82, 0x34, 0x31, 0xd7, + 0x52, 0x50, 0xb0, 0x86, 0xc9, 0xaa, 0xfe, 0xbe, 0x37, 0x70, 0x43, 0xbe, 0xbb, 0x0a, 0xfa, 0x33, + 0xf3, 0x07, 0x11, 0x03, 0x27, 0x32, 0xe6, 0xa7, 0x45, 0x58, 0xce, 0xb8, 0xfa, 0xfd, 0x3f, 0xdd, + 0x4f, 0x7b, 0x1d, 0x66, 0xc5, 0x6b, 0x60, 0x9a, 0xce, 0x19, 0xc4, 0x63, 0x61, 0x8a, 0x23, 0x3e, + 0x5a, 0x87, 0x9a, 0xe3, 0xda, 0xa2, 0x45, 0x6e, 0x45, 0x7d, 0x14, 0x71, 0x0b, 0x94, 0x90, 0xb1, + 0x2a, 0xa3, 0x37, 0x5e, 0x66, 0x9e, 0xde, 0x78, 0x31, 0xbf, 0x0d, 0x4b, 0x63, 0x59, 0x4f, 0xbe, + 0xe3, 0x93, 0xf0, 0xff, 0x98, 0x4c, 0x1d, 0x9f, 0xe2, 0x1f, 0x25, 0x05, 0xcf, 0xfc, 0x89, 0x01, + 0xf3, 0xa9, 0xf4, 0xf0, 0x4c, 0x55, 0xfa, 0x23, 0xb5, 0x4a, 0x3f, 0x5b, 0x6a, 0xab, 0xd5, 0xeb, + 0xe6, 0x7d, 0xc8, 0x7e, 0xf0, 0x99, 0x9e, 0x71, 0xe3, 0xe9, 0x33, 0x6e, 0xfe, 0xac, 0x00, 0xd5, + 0xf8, 0x9d, 0x0c, 0x7a, 0x4b, 0x9b, 0xb9, 0xcb, 0xea, 0xcc, 0x3d, 0x19, 0xd6, 0x85, 0xa0, 0x32, + 0x8d, 0x1f, 0x40, 0x35, 0x7e, 0x67, 0x15, 0x77, 0x21, 0xf2, 0xbf, 0xa5, 0x8a, 0x97, 0x36, 0x7e, + 0xbc, 0x85, 0x13, 0x3c, 0x96, 0xcf, 0x45, 0x0f, 0xa1, 0x1e, 0x38, 0xbd, 0x9e, 0x43, 0xe5, 0x25, + 0x46, 0x91, 0x5f, 0x62, 0xc4, 0xf9, 0xdc, 0x56, 0x86, 0x0c, 0xce, 0xd4, 0x44, 0x7b, 0x50, 0xa6, + 0x21, 0xf1, 0xa9, 0xec, 0x09, 0xbe, 0x91, 0xeb, 0x09, 0x11, 0xf1, 0x79, 0x35, 0x17, 0xbb, 0x08, + 0xa3, 0x50, 0x2c, 0x80, 0xcc, 0xff, 0x30, 0xa0, 0x12, 0x89, 0xa0, 0x37, 0xb5, 0xc9, 0x5b, 0x49, + 0x4d, 0x1e, 0x97, 0xfb, 0x5f, 0x3b, 0x77, 0xe6, 0xd0, 0x80, 0x79, 0xfd, 0x3a, 0x54, 0xe9, 0x21, + 0x18, 0xa7, 0xf5, 0x10, 0xd0, 0x9b, 0x50, 0xb1, 0x7a, 0x3d, 0xef, 0xe3, 0x6d, 0xf7, 0x58, 0xf6, + 0xed, 0xe2, 0xfb, 0xbd, 0x0d, 0x49, 0xc7, 0xb1, 0x04, 0x3a, 0x86, 0x05, 0xa1, 0x97, 0x3c, 0x74, + 0x2b, 0xe6, 0xbe, 0x66, 0xca, 0x3a, 0x6c, 0x9a, 0xcb, 0x2c, 0x3d, 0x6a, 0xe9, 0x98, 0x38, 0x6d, + 0xa4, 0x79, 0xfd, 0xb3, 0x2f, 0x56, 0xcf, 0xfd, 0xf2, 0x8b, 0xd5, 0x73, 0x9f, 0x7f, 0xb1, 0x7a, + 0xee, 0xd3, 0xd1, 0xaa, 0xf1, 0xd9, 0x68, 0xd5, 0xf8, 0xe5, 0x68, 0xd5, 0xf8, 0x7c, 0xb4, 0x6a, + 0xfc, 0xdb, 0x68, 0xd5, 0xf8, 0xe3, 0x7f, 0x5f, 0x3d, 0xf7, 0xed, 0xc2, 0xf1, 0xfa, 0x7f, 0x07, + 0x00, 0x00, 0xff, 0xff, 0xf9, 0x86, 0x27, 0x85, 0x8a, 0x40, 0x00, 0x00, +} + +func (m *BinaryBuildRequestOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BinaryBuildRequestOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BinaryBuildRequestOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.CommitterEmail) + copy(dAtA[i:], m.CommitterEmail) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CommitterEmail))) + i-- + dAtA[i] = 0x42 + i -= len(m.CommitterName) + copy(dAtA[i:], m.CommitterName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CommitterName))) + i-- + dAtA[i] = 0x3a + i -= len(m.AuthorEmail) + copy(dAtA[i:], m.AuthorEmail) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuthorEmail))) + i-- + dAtA[i] = 0x32 + i -= len(m.AuthorName) + copy(dAtA[i:], m.AuthorName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuthorName))) + i-- + dAtA[i] = 0x2a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Commit) + copy(dAtA[i:], m.Commit) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Commit))) + i-- + dAtA[i] = 0x1a + i -= len(m.AsFile) + copy(dAtA[i:], m.AsFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AsFile))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BinaryBuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BinaryBuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BinaryBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.AsFile) + copy(dAtA[i:], m.AsFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AsFile))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BitbucketWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BitbucketWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BitbucketWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.CommonWebHookCause.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Build) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Build) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Build) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildConfigList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildConfigList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildConfigList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildConfigSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildConfigSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildConfigSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FailedBuildsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedBuildsHistoryLimit)) + i-- + dAtA[i] = 0x28 + } + if m.SuccessfulBuildsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulBuildsHistoryLimit)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.CommonSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.RunPolicy) + copy(dAtA[i:], m.RunPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RunPolicy))) + i-- + dAtA[i] = 0x12 + if len(m.Triggers) > 0 { + for iNdEx := len(m.Triggers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Triggers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BuildConfigStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildConfigStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.LastVersion)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *BuildList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildLog) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildLog) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildLog) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *BuildLogOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildLogOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Version != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Version)) + i-- + dAtA[i] = 0x50 + } + i-- + if m.NoWait { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + if m.LimitBytes != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + i-- + dAtA[i] = 0x40 + } + if m.TailLines != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) + i-- + dAtA[i] = 0x38 + } + i-- + if m.Timestamps { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.SinceTime != nil { + { + size, err := m.SinceTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.SinceSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + i-- + dAtA[i] = 0x20 + } + i-- + if m.Previous { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildOutput) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildOutput) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildOutput) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ImageLabels) > 0 { + for iNdEx := len(m.ImageLabels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImageLabels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.PushSecret != nil { + { + size, err := m.PushSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.To != nil { + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BuildPostCommitSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildPostCommitSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildPostCommitSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Script) + copy(dAtA[i:], m.Script) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Script))) + i-- + dAtA[i] = 0x1a + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BuildRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SourceStrategyOptions != nil { + { + size, err := m.SourceStrategyOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.DockerStrategyOptions != nil { + { + size, err := m.DockerStrategyOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if len(m.TriggeredBy) > 0 { + for iNdEx := len(m.TriggeredBy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TriggeredBy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.LastVersion != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LastVersion)) + i-- + dAtA[i] = 0x30 + } + if m.Binary != nil { + { + size, err := m.Binary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.From != nil { + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.TriggeredByImage != nil { + { + size, err := m.TriggeredByImage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ConfigMaps) > 0 { + for iNdEx := len(m.ConfigMaps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConfigMaps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if len(m.Secrets) > 0 { + for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Secrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.SourceSecret != nil { + { + size, err := m.SourceSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.ContextDir) + copy(dAtA[i:], m.ContextDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContextDir))) + i-- + dAtA[i] = 0x32 + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.Git != nil { + { + size, err := m.Git.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Dockerfile != nil { + i -= len(*m.Dockerfile) + copy(dAtA[i:], *m.Dockerfile) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Dockerfile))) + i-- + dAtA[i] = 0x1a + } + if m.Binary != nil { + { + size, err := m.Binary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TriggeredBy) > 0 { + for iNdEx := len(m.TriggeredBy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TriggeredBy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.CommonSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + } + i -= len(m.LogSnippet) + copy(dAtA[i:], m.LogSnippet) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LogSnippet))) + i-- + dAtA[i] = 0x62 + if len(m.Stages) > 0 { + for iNdEx := len(m.Stages) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Stages[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + { + size, err := m.Output.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + i -= len(m.OutputDockerImageReference) + copy(dAtA[i:], m.OutputDockerImageReference) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OutputDockerImageReference))) + i-- + dAtA[i] = 0x42 + i = encodeVarintGenerated(dAtA, i, uint64(m.Duration)) + i-- + dAtA[i] = 0x38 + if m.CompletionTimestamp != nil { + { + size, err := m.CompletionTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.StartTimestamp != nil { + { + size, err := m.StartTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + i-- + if m.Cancelled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildStatusOutput) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildStatusOutput) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildStatusOutput) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.To != nil { + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BuildStatusOutputTo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildStatusOutputTo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildStatusOutputTo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ImageDigest) + copy(dAtA[i:], m.ImageDigest) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageDigest))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.JenkinsPipelineStrategy != nil { + { + size, err := m.JenkinsPipelineStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.CustomStrategy != nil { + { + size, err := m.CustomStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.SourceStrategy != nil { + { + size, err := m.SourceStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.DockerStrategy != nil { + { + size, err := m.DockerStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildTriggerCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildTriggerCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildTriggerCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BitbucketWebHook != nil { + { + size, err := m.BitbucketWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.GitLabWebHook != nil { + { + size, err := m.GitLabWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.ImageChangeBuild != nil { + { + size, err := m.ImageChangeBuild.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.GitHubWebHook != nil { + { + size, err := m.GitHubWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.GenericWebHook != nil { + { + size, err := m.GenericWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildTriggerPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildTriggerPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildTriggerPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BitbucketWebHook != nil { + { + size, err := m.BitbucketWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.GitLabWebHook != nil { + { + size, err := m.GitLabWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.ImageChange != nil { + { + size, err := m.ImageChange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.GenericWebHook != nil { + { + size, err := m.GenericWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.GitHubWebHook != nil { + { + size, err := m.GitHubWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CommonSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommonSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.CompletionDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CompletionDeadlineSeconds)) + i-- + dAtA[i] = 0x40 + } + { + size, err := m.PostCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + { + size, err := m.Output.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.ServiceAccount) + copy(dAtA[i:], m.ServiceAccount) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccount))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CommonWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommonWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0x12 + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConfigMapBuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapBuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DestinationDir) + copy(dAtA[i:], m.DestinationDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationDir))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomBuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomBuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.BuildAPIVersion) + copy(dAtA[i:], m.BuildAPIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BuildAPIVersion))) + i-- + dAtA[i] = 0x3a + if len(m.Secrets) > 0 { + for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Secrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i-- + if m.ForcePull { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + i-- + if m.ExposeDockerSocket { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.PullSecret != nil { + { + size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DockerBuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DockerBuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DockerBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ImageOptimizationPolicy != nil { + i -= len(*m.ImageOptimizationPolicy) + copy(dAtA[i:], *m.ImageOptimizationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ImageOptimizationPolicy))) + i-- + dAtA[i] = 0x42 + } + if len(m.BuildArgs) > 0 { + for iNdEx := len(m.BuildArgs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.BuildArgs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + i -= len(m.DockerfilePath) + copy(dAtA[i:], m.DockerfilePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerfilePath))) + i-- + dAtA[i] = 0x32 + i-- + if m.ForcePull { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i-- + if m.NoCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if m.PullSecret != nil { + { + size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.From != nil { + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DockerStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DockerStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DockerStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NoCache != nil { + i-- + if *m.NoCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.BuildArgs) > 0 { + for iNdEx := len(m.BuildArgs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.BuildArgs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GenericWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenericWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0x12 + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GenericWebHookEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericWebHookEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenericWebHookEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DockerStrategyOptions != nil { + { + size, err := m.DockerStrategyOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Git != nil { + { + size, err := m.Git.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitBuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitBuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProxyConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0x12 + i -= len(m.URI) + copy(dAtA[i:], m.URI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URI))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitHubWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitHubWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitHubWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0x12 + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GitInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Refs) > 0 { + for iNdEx := len(m.Refs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Refs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.GitSourceRevision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.GitBuildSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitLabWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitLabWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitLabWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.CommonWebHookCause.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitRefInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitRefInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitRefInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.GitSourceRevision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.GitBuildSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitSourceRevision) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitSourceRevision) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitSourceRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Committer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Author.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Commit) + copy(dAtA[i:], m.Commit) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Commit))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageChangeCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageChangeCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageChangeCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FromRef != nil { + { + size, err := m.FromRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.ImageID) + copy(dAtA[i:], m.ImageID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageChangeTrigger) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageChangeTrigger) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageChangeTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if m.From != nil { + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.LastTriggeredImageID) + copy(dAtA[i:], m.LastTriggeredImageID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LastTriggeredImageID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageLabel) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLabel) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageLabel) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.As) > 0 { + for iNdEx := len(m.As) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.As[iNdEx]) + copy(dAtA[i:], m.As[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.As[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.PullSecret != nil { + { + size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageSourcePath) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageSourcePath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageSourcePath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DestinationDir) + copy(dAtA[i:], m.DestinationDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationDir))) + i-- + dAtA[i] = 0x12 + i -= len(m.SourcePath) + copy(dAtA[i:], m.SourcePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SourcePath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JenkinsPipelineBuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JenkinsPipelineBuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JenkinsPipelineBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Jenkinsfile) + copy(dAtA[i:], m.Jenkinsfile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Jenkinsfile))) + i-- + dAtA[i] = 0x12 + i -= len(m.JenkinsfilePath) + copy(dAtA[i:], m.JenkinsfilePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JenkinsfilePath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m OptionalNodeSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m OptionalNodeSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m OptionalNodeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + keysForItems := make([]string, 0, len(m)) + for k := range m { + keysForItems = append(keysForItems, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForItems) + for iNdEx := len(keysForItems) - 1; iNdEx >= 0; iNdEx-- { + v := m[string(keysForItems[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForItems[iNdEx]) + copy(dAtA[i:], keysForItems[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForItems[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ProxyConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProxyConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProxyConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NoProxy != nil { + i -= len(*m.NoProxy) + copy(dAtA[i:], *m.NoProxy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NoProxy))) + i-- + dAtA[i] = 0x2a + } + if m.HTTPSProxy != nil { + i -= len(*m.HTTPSProxy) + copy(dAtA[i:], *m.HTTPSProxy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HTTPSProxy))) + i-- + dAtA[i] = 0x22 + } + if m.HTTPProxy != nil { + i -= len(*m.HTTPProxy) + copy(dAtA[i:], *m.HTTPProxy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HTTPProxy))) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} + +func (m *SecretBuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretBuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DestinationDir) + copy(dAtA[i:], m.DestinationDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationDir))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretLocalReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretLocalReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretLocalReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MountPath) + copy(dAtA[i:], m.MountPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) + i-- + dAtA[i] = 0x12 + { + size, err := m.SecretSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SourceBuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceBuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ForcePull { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.Incremental != nil { + i-- + if *m.Incremental { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + i -= len(m.Scripts) + copy(dAtA[i:], m.Scripts) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scripts))) + i-- + dAtA[i] = 0x22 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.PullSecret != nil { + { + size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SourceControlUser) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceControlUser) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceControlUser) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Email) + copy(dAtA[i:], m.Email) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Email))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SourceRevision) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceRevision) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Git != nil { + { + size, err := m.Git.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SourceStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Incremental != nil { + i-- + if *m.Incremental { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StageInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StageInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StageInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Steps) > 0 { + for iNdEx := len(m.Steps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Steps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.DurationMilliseconds)) + i-- + dAtA[i] = 0x18 + { + size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StepInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StepInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StepInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.DurationMilliseconds)) + i-- + dAtA[i] = 0x18 + { + size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebHookTrigger) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebHookTrigger) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebHookTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SecretReference != nil { + { + size, err := m.SecretReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i-- + if m.AllowEnv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BinaryBuildRequestOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AsFile) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Commit) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuthorName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuthorEmail) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CommitterName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CommitterEmail) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BinaryBuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AsFile) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BitbucketWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.CommonWebHookCause.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Build) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildConfigList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildConfigSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Triggers) > 0 { + for _, e := range m.Triggers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RunPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CommonSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.SuccessfulBuildsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.SuccessfulBuildsHistoryLimit)) + } + if m.FailedBuildsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.FailedBuildsHistoryLimit)) + } + return n +} + +func (m *BuildConfigStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.LastVersion)) + return n +} + +func (m *BuildList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildLog) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *BuildLogOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) + } + n += 2 + if m.Version != nil { + n += 1 + sovGenerated(uint64(*m.Version)) + } + return n +} + +func (m *BuildOutput) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.To != nil { + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PushSecret != nil { + l = m.PushSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ImageLabels) > 0 { + for _, e := range m.ImageLabels { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildPostCommitSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Script) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TriggeredByImage != nil { + l = m.TriggeredByImage.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Binary != nil { + l = m.Binary.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastVersion != nil { + n += 1 + sovGenerated(uint64(*m.LastVersion)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.TriggeredBy) > 0 { + for _, e := range m.TriggeredBy { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DockerStrategyOptions != nil { + l = m.DockerStrategyOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SourceStrategyOptions != nil { + l = m.SourceStrategyOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Binary != nil { + l = m.Binary.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Dockerfile != nil { + l = len(*m.Dockerfile) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Git != nil { + l = m.Git.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ContextDir) + n += 1 + l + sovGenerated(uint64(l)) + if m.SourceSecret != nil { + l = m.SourceSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ConfigMaps) > 0 { + for _, e := range m.ConfigMaps { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.CommonSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.TriggeredBy) > 0 { + for _, e := range m.TriggeredBy { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartTimestamp != nil { + l = m.StartTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CompletionTimestamp != nil { + l = m.CompletionTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Duration)) + l = len(m.OutputDockerImageReference) + n += 1 + l + sovGenerated(uint64(l)) + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Output.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Stages) > 0 { + for _, e := range m.Stages { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.LogSnippet) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildStatusOutput) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.To != nil { + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildStatusOutputTo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ImageDigest) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.DockerStrategy != nil { + l = m.DockerStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SourceStrategy != nil { + l = m.SourceStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CustomStrategy != nil { + l = m.CustomStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.JenkinsPipelineStrategy != nil { + l = m.JenkinsPipelineStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildTriggerCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.GenericWebHook != nil { + l = m.GenericWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitHubWebHook != nil { + l = m.GitHubWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ImageChangeBuild != nil { + l = m.ImageChangeBuild.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitLabWebHook != nil { + l = m.GitLabWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.BitbucketWebHook != nil { + l = m.BitbucketWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildTriggerPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.GitHubWebHook != nil { + l = m.GitHubWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GenericWebHook != nil { + l = m.GenericWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ImageChange != nil { + l = m.ImageChange.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitLabWebHook != nil { + l = m.GitLabWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.BitbucketWebHook != nil { + l = m.BitbucketWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CommonSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceAccount) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Output.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.PostCommit.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.CompletionDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.CompletionDeadlineSeconds)) + } + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CommonWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConfigMapBuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DestinationDir) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomBuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + n += 2 + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.BuildAPIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DockerBuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + l = len(m.DockerfilePath) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.BuildArgs) > 0 { + for _, e := range m.BuildArgs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ImageOptimizationPolicy != nil { + l = len(*m.ImageOptimizationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DockerStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.BuildArgs) > 0 { + for _, e := range m.BuildArgs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.NoCache != nil { + n += 2 + } + return n +} + +func (m *GenericWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GenericWebHookEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Git != nil { + l = m.Git.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DockerStrategyOptions != nil { + l = m.DockerStrategyOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *GitBuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.URI) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Ref) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ProxyConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitHubWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.GitBuildSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.GitSourceRevision.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Refs) > 0 { + for _, e := range m.Refs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GitLabWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.CommonWebHookCause.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitRefInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.GitBuildSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.GitSourceRevision.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitSourceRevision) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Commit) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Author.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Committer.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageChangeCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ImageID) + n += 1 + l + sovGenerated(uint64(l)) + if m.FromRef != nil { + l = m.FromRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ImageChangeTrigger) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LastTriggeredImageID) + n += 1 + l + sovGenerated(uint64(l)) + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *ImageLabel) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.As) > 0 { + for _, s := range m.As { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageSourcePath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SourcePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DestinationDir) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JenkinsPipelineBuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.JenkinsfilePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Jenkinsfile) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m OptionalNodeSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for k, v := range m { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ProxyConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HTTPProxy != nil { + l = len(*m.HTTPProxy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HTTPSProxy != nil { + l = len(*m.HTTPSProxy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NoProxy != nil { + l = len(*m.NoProxy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SecretBuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DestinationDir) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecretLocalReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecretSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.SecretSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MountPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SourceBuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Scripts) + n += 1 + l + sovGenerated(uint64(l)) + if m.Incremental != nil { + n += 2 + } + n += 2 + return n +} + +func (m *SourceControlUser) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Email) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SourceRevision) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Git != nil { + l = m.Git.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SourceStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Incremental != nil { + n += 2 + } + return n +} + +func (m *StageInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.DurationMilliseconds)) + if len(m.Steps) > 0 { + for _, e := range m.Steps { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StepInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.DurationMilliseconds)) + return n +} + +func (m *WebHookTrigger) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretReference != nil { + l = m.SecretReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *BinaryBuildRequestOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BinaryBuildRequestOptions{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `AsFile:` + fmt.Sprintf("%v", this.AsFile) + `,`, + `Commit:` + fmt.Sprintf("%v", this.Commit) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `AuthorName:` + fmt.Sprintf("%v", this.AuthorName) + `,`, + `AuthorEmail:` + fmt.Sprintf("%v", this.AuthorEmail) + `,`, + `CommitterName:` + fmt.Sprintf("%v", this.CommitterName) + `,`, + `CommitterEmail:` + fmt.Sprintf("%v", this.CommitterEmail) + `,`, + `}`, + }, "") + return s +} +func (this *BinaryBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BinaryBuildSource{`, + `AsFile:` + fmt.Sprintf("%v", this.AsFile) + `,`, + `}`, + }, "") + return s +} +func (this *BitbucketWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BitbucketWebHookCause{`, + `CommonWebHookCause:` + strings.Replace(strings.Replace(this.CommonWebHookCause.String(), "CommonWebHookCause", "CommonWebHookCause", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Build) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Build{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BuildSpec", "BuildSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "BuildStatus", "BuildStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildConfig{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BuildConfigSpec", "BuildConfigSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "BuildConfigStatus", "BuildConfigStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfigList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]BuildConfig{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "BuildConfig", "BuildConfig", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&BuildConfigList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfigSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTriggers := "[]BuildTriggerPolicy{" + for _, f := range this.Triggers { + repeatedStringForTriggers += strings.Replace(strings.Replace(f.String(), "BuildTriggerPolicy", "BuildTriggerPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForTriggers += "}" + s := strings.Join([]string{`&BuildConfigSpec{`, + `Triggers:` + repeatedStringForTriggers + `,`, + `RunPolicy:` + fmt.Sprintf("%v", this.RunPolicy) + `,`, + `CommonSpec:` + strings.Replace(strings.Replace(this.CommonSpec.String(), "CommonSpec", "CommonSpec", 1), `&`, ``, 1) + `,`, + `SuccessfulBuildsHistoryLimit:` + valueToStringGenerated(this.SuccessfulBuildsHistoryLimit) + `,`, + `FailedBuildsHistoryLimit:` + valueToStringGenerated(this.FailedBuildsHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfigStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildConfigStatus{`, + `LastVersion:` + fmt.Sprintf("%v", this.LastVersion) + `,`, + `}`, + }, "") + return s +} +func (this *BuildList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Build{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Build", "Build", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&BuildList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *BuildLog) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildLog{`, + `}`, + }, "") + return s +} +func (this *BuildLogOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildLogOptions{`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, + `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "v1.Time", 1) + `,`, + `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, + `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, + `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`, + `Version:` + valueToStringGenerated(this.Version) + `,`, + `}`, + }, "") + return s +} +func (this *BuildOutput) String() string { + if this == nil { + return "nil" + } + repeatedStringForImageLabels := "[]ImageLabel{" + for _, f := range this.ImageLabels { + repeatedStringForImageLabels += strings.Replace(strings.Replace(f.String(), "ImageLabel", "ImageLabel", 1), `&`, ``, 1) + "," + } + repeatedStringForImageLabels += "}" + s := strings.Join([]string{`&BuildOutput{`, + `To:` + strings.Replace(fmt.Sprintf("%v", this.To), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `PushSecret:` + strings.Replace(fmt.Sprintf("%v", this.PushSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `ImageLabels:` + repeatedStringForImageLabels + `,`, + `}`, + }, "") + return s +} +func (this *BuildPostCommitSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildPostCommitSpec{`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `Script:` + fmt.Sprintf("%v", this.Script) + `,`, + `}`, + }, "") + return s +} +func (this *BuildRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForTriggeredBy := "[]BuildTriggerCause{" + for _, f := range this.TriggeredBy { + repeatedStringForTriggeredBy += strings.Replace(strings.Replace(f.String(), "BuildTriggerCause", "BuildTriggerCause", 1), `&`, ``, 1) + "," + } + repeatedStringForTriggeredBy += "}" + s := strings.Join([]string{`&BuildRequest{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `TriggeredByImage:` + strings.Replace(fmt.Sprintf("%v", this.TriggeredByImage), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Binary:` + strings.Replace(this.Binary.String(), "BinaryBuildSource", "BinaryBuildSource", 1) + `,`, + `LastVersion:` + valueToStringGenerated(this.LastVersion) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `TriggeredBy:` + repeatedStringForTriggeredBy + `,`, + `DockerStrategyOptions:` + strings.Replace(this.DockerStrategyOptions.String(), "DockerStrategyOptions", "DockerStrategyOptions", 1) + `,`, + `SourceStrategyOptions:` + strings.Replace(this.SourceStrategyOptions.String(), "SourceStrategyOptions", "SourceStrategyOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForImages := "[]ImageSource{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageSource", "ImageSource", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + repeatedStringForSecrets := "[]SecretBuildSource{" + for _, f := range this.Secrets { + repeatedStringForSecrets += strings.Replace(strings.Replace(f.String(), "SecretBuildSource", "SecretBuildSource", 1), `&`, ``, 1) + "," + } + repeatedStringForSecrets += "}" + repeatedStringForConfigMaps := "[]ConfigMapBuildSource{" + for _, f := range this.ConfigMaps { + repeatedStringForConfigMaps += strings.Replace(strings.Replace(f.String(), "ConfigMapBuildSource", "ConfigMapBuildSource", 1), `&`, ``, 1) + "," + } + repeatedStringForConfigMaps += "}" + s := strings.Join([]string{`&BuildSource{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Binary:` + strings.Replace(this.Binary.String(), "BinaryBuildSource", "BinaryBuildSource", 1) + `,`, + `Dockerfile:` + valueToStringGenerated(this.Dockerfile) + `,`, + `Git:` + strings.Replace(this.Git.String(), "GitBuildSource", "GitBuildSource", 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `ContextDir:` + fmt.Sprintf("%v", this.ContextDir) + `,`, + `SourceSecret:` + strings.Replace(fmt.Sprintf("%v", this.SourceSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `Secrets:` + repeatedStringForSecrets + `,`, + `ConfigMaps:` + repeatedStringForConfigMaps + `,`, + `}`, + }, "") + return s +} +func (this *BuildSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTriggeredBy := "[]BuildTriggerCause{" + for _, f := range this.TriggeredBy { + repeatedStringForTriggeredBy += strings.Replace(strings.Replace(f.String(), "BuildTriggerCause", "BuildTriggerCause", 1), `&`, ``, 1) + "," + } + repeatedStringForTriggeredBy += "}" + s := strings.Join([]string{`&BuildSpec{`, + `CommonSpec:` + strings.Replace(strings.Replace(this.CommonSpec.String(), "CommonSpec", "CommonSpec", 1), `&`, ``, 1) + `,`, + `TriggeredBy:` + repeatedStringForTriggeredBy + `,`, + `}`, + }, "") + return s +} +func (this *BuildStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForStages := "[]StageInfo{" + for _, f := range this.Stages { + repeatedStringForStages += strings.Replace(strings.Replace(f.String(), "StageInfo", "StageInfo", 1), `&`, ``, 1) + "," + } + repeatedStringForStages += "}" + repeatedStringForConditions := "[]BuildCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "BuildCondition", "BuildCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&BuildStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Cancelled:` + fmt.Sprintf("%v", this.Cancelled) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `StartTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.StartTimestamp), "Time", "v1.Time", 1) + `,`, + `CompletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTimestamp), "Time", "v1.Time", 1) + `,`, + `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, + `OutputDockerImageReference:` + fmt.Sprintf("%v", this.OutputDockerImageReference) + `,`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Output:` + strings.Replace(strings.Replace(this.Output.String(), "BuildStatusOutput", "BuildStatusOutput", 1), `&`, ``, 1) + `,`, + `Stages:` + repeatedStringForStages + `,`, + `LogSnippet:` + fmt.Sprintf("%v", this.LogSnippet) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *BuildStatusOutput) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildStatusOutput{`, + `To:` + strings.Replace(this.To.String(), "BuildStatusOutputTo", "BuildStatusOutputTo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildStatusOutputTo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildStatusOutputTo{`, + `ImageDigest:` + fmt.Sprintf("%v", this.ImageDigest) + `,`, + `}`, + }, "") + return s +} +func (this *BuildStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `DockerStrategy:` + strings.Replace(this.DockerStrategy.String(), "DockerBuildStrategy", "DockerBuildStrategy", 1) + `,`, + `SourceStrategy:` + strings.Replace(this.SourceStrategy.String(), "SourceBuildStrategy", "SourceBuildStrategy", 1) + `,`, + `CustomStrategy:` + strings.Replace(this.CustomStrategy.String(), "CustomBuildStrategy", "CustomBuildStrategy", 1) + `,`, + `JenkinsPipelineStrategy:` + strings.Replace(this.JenkinsPipelineStrategy.String(), "JenkinsPipelineBuildStrategy", "JenkinsPipelineBuildStrategy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildTriggerCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildTriggerCause{`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `GenericWebHook:` + strings.Replace(this.GenericWebHook.String(), "GenericWebHookCause", "GenericWebHookCause", 1) + `,`, + `GitHubWebHook:` + strings.Replace(this.GitHubWebHook.String(), "GitHubWebHookCause", "GitHubWebHookCause", 1) + `,`, + `ImageChangeBuild:` + strings.Replace(this.ImageChangeBuild.String(), "ImageChangeCause", "ImageChangeCause", 1) + `,`, + `GitLabWebHook:` + strings.Replace(this.GitLabWebHook.String(), "GitLabWebHookCause", "GitLabWebHookCause", 1) + `,`, + `BitbucketWebHook:` + strings.Replace(this.BitbucketWebHook.String(), "BitbucketWebHookCause", "BitbucketWebHookCause", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildTriggerPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildTriggerPolicy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `GitHubWebHook:` + strings.Replace(this.GitHubWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`, + `GenericWebHook:` + strings.Replace(this.GenericWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`, + `ImageChange:` + strings.Replace(this.ImageChange.String(), "ImageChangeTrigger", "ImageChangeTrigger", 1) + `,`, + `GitLabWebHook:` + strings.Replace(this.GitLabWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`, + `BitbucketWebHook:` + strings.Replace(this.BitbucketWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CommonSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CommonSpec{`, + `ServiceAccount:` + fmt.Sprintf("%v", this.ServiceAccount) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "BuildSource", "BuildSource", 1), `&`, ``, 1) + `,`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "BuildStrategy", "BuildStrategy", 1), `&`, ``, 1) + `,`, + `Output:` + strings.Replace(strings.Replace(this.Output.String(), "BuildOutput", "BuildOutput", 1), `&`, ``, 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v11.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `PostCommit:` + strings.Replace(strings.Replace(this.PostCommit.String(), "BuildPostCommitSpec", "BuildPostCommitSpec", 1), `&`, ``, 1) + `,`, + `CompletionDeadlineSeconds:` + valueToStringGenerated(this.CompletionDeadlineSeconds) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "OptionalNodeSelector", "OptionalNodeSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CommonWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CommonWebHookCause{`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapBuildSource{`, + `ConfigMap:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "LocalObjectReference", "v11.LocalObjectReference", 1), `&`, ``, 1) + `,`, + `DestinationDir:` + fmt.Sprintf("%v", this.DestinationDir) + `,`, + `}`, + }, "") + return s +} +func (this *CustomBuildStrategy) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForSecrets := "[]SecretSpec{" + for _, f := range this.Secrets { + repeatedStringForSecrets += strings.Replace(strings.Replace(f.String(), "SecretSpec", "SecretSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForSecrets += "}" + s := strings.Join([]string{`&CustomBuildStrategy{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `ExposeDockerSocket:` + fmt.Sprintf("%v", this.ExposeDockerSocket) + `,`, + `ForcePull:` + fmt.Sprintf("%v", this.ForcePull) + `,`, + `Secrets:` + repeatedStringForSecrets + `,`, + `BuildAPIVersion:` + fmt.Sprintf("%v", this.BuildAPIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *DockerBuildStrategy) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForBuildArgs := "[]EnvVar{" + for _, f := range this.BuildArgs { + repeatedStringForBuildArgs += fmt.Sprintf("%v", f) + "," + } + repeatedStringForBuildArgs += "}" + s := strings.Join([]string{`&DockerBuildStrategy{`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `NoCache:` + fmt.Sprintf("%v", this.NoCache) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `ForcePull:` + fmt.Sprintf("%v", this.ForcePull) + `,`, + `DockerfilePath:` + fmt.Sprintf("%v", this.DockerfilePath) + `,`, + `BuildArgs:` + repeatedStringForBuildArgs + `,`, + `ImageOptimizationPolicy:` + valueToStringGenerated(this.ImageOptimizationPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *DockerStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForBuildArgs := "[]EnvVar{" + for _, f := range this.BuildArgs { + repeatedStringForBuildArgs += fmt.Sprintf("%v", f) + "," + } + repeatedStringForBuildArgs += "}" + s := strings.Join([]string{`&DockerStrategyOptions{`, + `BuildArgs:` + repeatedStringForBuildArgs + `,`, + `NoCache:` + valueToStringGenerated(this.NoCache) + `,`, + `}`, + }, "") + return s +} +func (this *GenericWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericWebHookCause{`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `}`, + }, "") + return s +} +func (this *GenericWebHookEvent) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&GenericWebHookEvent{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Git:` + strings.Replace(this.Git.String(), "GitInfo", "GitInfo", 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `DockerStrategyOptions:` + strings.Replace(this.DockerStrategyOptions.String(), "DockerStrategyOptions", "DockerStrategyOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitBuildSource{`, + `URI:` + fmt.Sprintf("%v", this.URI) + `,`, + `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, + `ProxyConfig:` + strings.Replace(strings.Replace(this.ProxyConfig.String(), "ProxyConfig", "ProxyConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitHubWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitHubWebHookCause{`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `}`, + }, "") + return s +} +func (this *GitInfo) String() string { + if this == nil { + return "nil" + } + repeatedStringForRefs := "[]GitRefInfo{" + for _, f := range this.Refs { + repeatedStringForRefs += strings.Replace(strings.Replace(f.String(), "GitRefInfo", "GitRefInfo", 1), `&`, ``, 1) + "," + } + repeatedStringForRefs += "}" + s := strings.Join([]string{`&GitInfo{`, + `GitBuildSource:` + strings.Replace(strings.Replace(this.GitBuildSource.String(), "GitBuildSource", "GitBuildSource", 1), `&`, ``, 1) + `,`, + `GitSourceRevision:` + strings.Replace(strings.Replace(this.GitSourceRevision.String(), "GitSourceRevision", "GitSourceRevision", 1), `&`, ``, 1) + `,`, + `Refs:` + repeatedStringForRefs + `,`, + `}`, + }, "") + return s +} +func (this *GitLabWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitLabWebHookCause{`, + `CommonWebHookCause:` + strings.Replace(strings.Replace(this.CommonWebHookCause.String(), "CommonWebHookCause", "CommonWebHookCause", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitRefInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitRefInfo{`, + `GitBuildSource:` + strings.Replace(strings.Replace(this.GitBuildSource.String(), "GitBuildSource", "GitBuildSource", 1), `&`, ``, 1) + `,`, + `GitSourceRevision:` + strings.Replace(strings.Replace(this.GitSourceRevision.String(), "GitSourceRevision", "GitSourceRevision", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitSourceRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitSourceRevision{`, + `Commit:` + fmt.Sprintf("%v", this.Commit) + `,`, + `Author:` + strings.Replace(strings.Replace(this.Author.String(), "SourceControlUser", "SourceControlUser", 1), `&`, ``, 1) + `,`, + `Committer:` + strings.Replace(strings.Replace(this.Committer.String(), "SourceControlUser", "SourceControlUser", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ImageChangeCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageChangeCause{`, + `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, + `FromRef:` + strings.Replace(fmt.Sprintf("%v", this.FromRef), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageChangeTrigger) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageChangeTrigger{`, + `LastTriggeredImageID:` + fmt.Sprintf("%v", this.LastTriggeredImageID) + `,`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `}`, + }, "") + return s +} +func (this *ImageLabel) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLabel{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *ImageSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForPaths := "[]ImageSourcePath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "ImageSourcePath", "ImageSourcePath", 1), `&`, ``, 1) + "," + } + repeatedStringForPaths += "}" + s := strings.Join([]string{`&ImageSource{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `Paths:` + repeatedStringForPaths + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `As:` + fmt.Sprintf("%v", this.As) + `,`, + `}`, + }, "") + return s +} +func (this *ImageSourcePath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageSourcePath{`, + `SourcePath:` + fmt.Sprintf("%v", this.SourcePath) + `,`, + `DestinationDir:` + fmt.Sprintf("%v", this.DestinationDir) + `,`, + `}`, + }, "") + return s +} +func (this *JenkinsPipelineBuildStrategy) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&JenkinsPipelineBuildStrategy{`, + `JenkinsfilePath:` + fmt.Sprintf("%v", this.JenkinsfilePath) + `,`, + `Jenkinsfile:` + fmt.Sprintf("%v", this.Jenkinsfile) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `}`, + }, "") + return s +} +func (this *ProxyConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProxyConfig{`, + `HTTPProxy:` + valueToStringGenerated(this.HTTPProxy) + `,`, + `HTTPSProxy:` + valueToStringGenerated(this.HTTPSProxy) + `,`, + `NoProxy:` + valueToStringGenerated(this.NoProxy) + `,`, + `}`, + }, "") + return s +} +func (this *SecretBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretBuildSource{`, + `Secret:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secret), "LocalObjectReference", "v11.LocalObjectReference", 1), `&`, ``, 1) + `,`, + `DestinationDir:` + fmt.Sprintf("%v", this.DestinationDir) + `,`, + `}`, + }, "") + return s +} +func (this *SecretLocalReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretLocalReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *SecretSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretSpec{`, + `SecretSource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretSource), "LocalObjectReference", "v11.LocalObjectReference", 1), `&`, ``, 1) + `,`, + `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, + `}`, + }, "") + return s +} +func (this *SourceBuildStrategy) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&SourceBuildStrategy{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `Scripts:` + fmt.Sprintf("%v", this.Scripts) + `,`, + `Incremental:` + valueToStringGenerated(this.Incremental) + `,`, + `ForcePull:` + fmt.Sprintf("%v", this.ForcePull) + `,`, + `}`, + }, "") + return s +} +func (this *SourceControlUser) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceControlUser{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Email:` + fmt.Sprintf("%v", this.Email) + `,`, + `}`, + }, "") + return s +} +func (this *SourceRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceRevision{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Git:` + strings.Replace(this.Git.String(), "GitSourceRevision", "GitSourceRevision", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SourceStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceStrategyOptions{`, + `Incremental:` + valueToStringGenerated(this.Incremental) + `,`, + `}`, + }, "") + return s +} +func (this *StageInfo) String() string { + if this == nil { + return "nil" + } + repeatedStringForSteps := "[]StepInfo{" + for _, f := range this.Steps { + repeatedStringForSteps += strings.Replace(strings.Replace(f.String(), "StepInfo", "StepInfo", 1), `&`, ``, 1) + "," + } + repeatedStringForSteps += "}" + s := strings.Join([]string{`&StageInfo{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `StartTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DurationMilliseconds:` + fmt.Sprintf("%v", this.DurationMilliseconds) + `,`, + `Steps:` + repeatedStringForSteps + `,`, + `}`, + }, "") + return s +} +func (this *StepInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StepInfo{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `StartTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DurationMilliseconds:` + fmt.Sprintf("%v", this.DurationMilliseconds) + `,`, + `}`, + }, "") + return s +} +func (this *WebHookTrigger) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebHookTrigger{`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `AllowEnv:` + fmt.Sprintf("%v", this.AllowEnv) + `,`, + `SecretReference:` + strings.Replace(this.SecretReference.String(), "SecretLocalReference", "SecretLocalReference", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *BinaryBuildRequestOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BinaryBuildRequestOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BinaryBuildRequestOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AsFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AsFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthorName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuthorName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthorEmail", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuthorEmail = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommitterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CommitterName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommitterEmail", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CommitterEmail = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BinaryBuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BinaryBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BinaryBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AsFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AsFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BitbucketWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BitbucketWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BitbucketWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonWebHookCause", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonWebHookCause.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Build) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Build: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Build: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfigList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfigList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfigList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, BuildConfig{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfigSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfigSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Triggers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Triggers = append(m.Triggers, BuildTriggerPolicy{}) + if err := m.Triggers[len(m.Triggers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RunPolicy = BuildRunPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulBuildsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SuccessfulBuildsHistoryLimit = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailedBuildsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FailedBuildsHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfigStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfigStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastVersion", wireType) + } + m.LastVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastVersion |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Build{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildLog) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildLog: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildLog: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildLogOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildLogOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildLogOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Follow = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Previous = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SinceSeconds = &v + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SinceTime == nil { + m.SinceTime = &v1.Time{} + } + if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Timestamps = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TailLines = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LimitBytes = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoWait = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Version = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildOutput) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildOutput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildOutput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.To == nil { + m.To = &v11.ObjectReference{} + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PushSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PushSecret == nil { + m.PushSecret = &v11.LocalObjectReference{} + } + if err := m.PushSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageLabels = append(m.ImageLabels, ImageLabel{}) + if err := m.ImageLabels[len(m.ImageLabels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildPostCommitSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildPostCommitSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildPostCommitSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Script", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Script = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TriggeredByImage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TriggeredByImage == nil { + m.TriggeredByImage = &v11.ObjectReference{} + } + if err := m.TriggeredByImage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &v11.ObjectReference{} + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Binary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Binary == nil { + m.Binary = &BinaryBuildSource{} + } + if err := m.Binary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastVersion", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LastVersion = &v + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TriggeredBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TriggeredBy = append(m.TriggeredBy, BuildTriggerCause{}) + if err := m.TriggeredBy[len(m.TriggeredBy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerStrategyOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DockerStrategyOptions == nil { + m.DockerStrategyOptions = &DockerStrategyOptions{} + } + if err := m.DockerStrategyOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceStrategyOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceStrategyOptions == nil { + m.SourceStrategyOptions = &SourceStrategyOptions{} + } + if err := m.SourceStrategyOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Binary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Binary == nil { + m.Binary = &BinaryBuildSource{} + } + if err := m.Binary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dockerfile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Dockerfile = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Git == nil { + m.Git = &GitBuildSource{} + } + if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ImageSource{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContextDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContextDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceSecret == nil { + m.SourceSecret = &v11.LocalObjectReference{} + } + if err := m.SourceSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, SecretBuildSource{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMaps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigMaps = append(m.ConfigMaps, ConfigMapBuildSource{}) + if err := m.ConfigMaps[len(m.ConfigMaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TriggeredBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TriggeredBy = append(m.TriggeredBy, BuildTriggerCause{}) + if err := m.TriggeredBy[len(m.TriggeredBy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = BuildPhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cancelled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Cancelled = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = StatusReason(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTimestamp == nil { + m.StartTimestamp = &v1.Time{} + } + if err := m.StartTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletionTimestamp == nil { + m.CompletionTimestamp = &v1.Time{} + } + if err := m.CompletionTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= time.Duration(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OutputDockerImageReference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OutputDockerImageReference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &v11.ObjectReference{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Output.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stages = append(m.Stages, StageInfo{}) + if err := m.Stages[len(m.Stages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSnippet", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSnippet = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, BuildCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildStatusOutput) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildStatusOutput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildStatusOutput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.To == nil { + m.To = &BuildStatusOutputTo{} + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildStatusOutputTo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildStatusOutputTo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildStatusOutputTo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageDigest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageDigest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DockerStrategy == nil { + m.DockerStrategy = &DockerBuildStrategy{} + } + if err := m.DockerStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceStrategy == nil { + m.SourceStrategy = &SourceBuildStrategy{} + } + if err := m.SourceStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CustomStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CustomStrategy == nil { + m.CustomStrategy = &CustomBuildStrategy{} + } + if err := m.CustomStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JenkinsPipelineStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.JenkinsPipelineStrategy == nil { + m.JenkinsPipelineStrategy = &JenkinsPipelineBuildStrategy{} + } + if err := m.JenkinsPipelineStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildTriggerCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildTriggerCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildTriggerCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenericWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GenericWebHook == nil { + m.GenericWebHook = &GenericWebHookCause{} + } + if err := m.GenericWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitHubWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitHubWebHook == nil { + m.GitHubWebHook = &GitHubWebHookCause{} + } + if err := m.GitHubWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageChangeBuild", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageChangeBuild == nil { + m.ImageChangeBuild = &ImageChangeCause{} + } + if err := m.ImageChangeBuild.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitLabWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitLabWebHook == nil { + m.GitLabWebHook = &GitLabWebHookCause{} + } + if err := m.GitLabWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BitbucketWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BitbucketWebHook == nil { + m.BitbucketWebHook = &BitbucketWebHookCause{} + } + if err := m.BitbucketWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildTriggerPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildTriggerPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildTriggerPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildTriggerType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitHubWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitHubWebHook == nil { + m.GitHubWebHook = &WebHookTrigger{} + } + if err := m.GitHubWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenericWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GenericWebHook == nil { + m.GenericWebHook = &WebHookTrigger{} + } + if err := m.GenericWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageChange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageChange == nil { + m.ImageChange = &ImageChangeTrigger{} + } + if err := m.ImageChange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitLabWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitLabWebHook == nil { + m.GitLabWebHook = &WebHookTrigger{} + } + if err := m.GitLabWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BitbucketWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BitbucketWebHook == nil { + m.BitbucketWebHook = &WebHookTrigger{} + } + if err := m.BitbucketWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommonSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommonSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommonSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Output.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PostCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PostCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CompletionDeadlineSeconds = &v + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = OptionalNodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommonWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommonWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommonWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapBuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomBuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &v11.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExposeDockerSocket", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExposeDockerSocket = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForcePull", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ForcePull = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, SecretSpec{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildAPIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BuildAPIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DockerBuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DockerBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DockerBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &v11.ObjectReference{} + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &v11.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoCache", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoCache = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForcePull", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ForcePull = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerfilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerfilePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildArgs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BuildArgs = append(m.BuildArgs, v11.EnvVar{}) + if err := m.BuildArgs[len(m.BuildArgs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageOptimizationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ImageOptimizationPolicy(dAtA[iNdEx:postIndex]) + m.ImageOptimizationPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DockerStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DockerStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DockerStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildArgs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BuildArgs = append(m.BuildArgs, v11.EnvVar{}) + if err := m.BuildArgs[len(m.BuildArgs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoCache", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.NoCache = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericWebHookEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericWebHookEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericWebHookEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Git == nil { + m.Git = &GitInfo{} + } + if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerStrategyOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DockerStrategyOptions == nil { + m.DockerStrategyOptions = &DockerStrategyOptions{} + } + if err := m.DockerStrategyOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitBuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProxyConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProxyConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitHubWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitHubWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitHubWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitBuildSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GitBuildSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitSourceRevision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GitSourceRevision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Refs = append(m.Refs, GitRefInfo{}) + if err := m.Refs[len(m.Refs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitLabWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitLabWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitLabWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonWebHookCause", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonWebHookCause.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitRefInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitRefInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitRefInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitBuildSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GitBuildSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitSourceRevision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GitSourceRevision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitSourceRevision) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitSourceRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitSourceRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Author", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Author.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Committer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Committer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageChangeCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageChangeCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageChangeCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FromRef == nil { + m.FromRef = &v11.ObjectReference{} + } + if err := m.FromRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageChangeTrigger) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageChangeTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageChangeTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTriggeredImageID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastTriggeredImageID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &v11.ObjectReference{} + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageLabel) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLabel: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLabel: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, ImageSourcePath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &v11.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field As", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.As = append(m.As, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageSourcePath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageSourcePath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageSourcePath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourcePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourcePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JenkinsPipelineBuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JenkinsPipelineBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JenkinsPipelineBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JenkinsfilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JenkinsfilePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Jenkinsfile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Jenkinsfile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalNodeSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalNodeSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalNodeSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if *m == nil { + *m = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + (*m)[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProxyConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProxyConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProxyConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.HTTPProxy = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPSProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.HTTPSProxy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NoProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NoProxy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretBuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretLocalReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretLocalReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretLocalReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SecretSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceBuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &v11.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scripts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scripts = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Incremental", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Incremental = &b + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForcePull", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ForcePull = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceControlUser) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceControlUser: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceControlUser: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Email", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Email = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceRevision) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Git == nil { + m.Git = &GitSourceRevision{} + } + if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Incremental", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Incremental = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StageInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StageInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StageInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = StageName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationMilliseconds", wireType) + } + m.DurationMilliseconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationMilliseconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Steps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Steps = append(m.Steps, StepInfo{}) + if err := m.Steps[len(m.Steps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StepInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StepInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StepInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = StepName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationMilliseconds", wireType) + } + m.DurationMilliseconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationMilliseconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebHookTrigger) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebHookTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebHookTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowEnv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowEnv = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretReference == nil { + m.SecretReference = &SecretLocalReference{} + } + if err := m.SecretReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/openshift/api/build/v1/generated.proto b/vendor/github.com/openshift/api/build/v1/generated.proto new file mode 100644 index 0000000000000000000000000000000000000000..9a756bfeb1ca7407d1706357c37c4c6d5dc5d2cf --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/generated.proto @@ -0,0 +1,1047 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package github.com.openshift.api.build.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// BinaryBuildRequestOptions are the options required to fully speficy a binary build request +message BinaryBuildRequestOptions { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // asFile determines if the binary should be created as a file within the source rather than extracted as an archive + optional string asFile = 2; + + // revision.commit is the value identifying a specific commit + optional string revisionCommit = 3; + + // revision.message is the description of a specific commit + optional string revisionMessage = 4; + + // revision.authorName of the source control user + optional string revisionAuthorName = 5; + + // revision.authorEmail of the source control user + optional string revisionAuthorEmail = 6; + + // revision.committerName of the source control user + optional string revisionCommitterName = 7; + + // revision.committerEmail of the source control user + optional string revisionCommitterEmail = 8; +} + +// BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, +// where the file will be extracted and used as the build source. +message BinaryBuildSource { + // asFile indicates that the provided binary input should be considered a single file + // within the build input. For example, specifying "webapp.war" would place the provided + // binary as `/webapp.war` for the builder. If left empty, the Docker and Source build + // strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. + // The custom strategy receives this binary as standard input. This filename may not + // contain slashes or be '..' or '.'. + optional string asFile = 1; +} + +// BitbucketWebHookCause has information about a Bitbucket webhook that triggered a +// build. +message BitbucketWebHookCause { + optional CommonWebHookCause commonSpec = 1; +} + +// Build encapsulates the inputs needed to produce a new deployable image, as well as +// the status of the execution and a reference to the Pod which executed the build. +message Build { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is all the inputs used to execute the build. + optional BuildSpec spec = 2; + + // status is the current status of the build. + // +optional + optional BuildStatus status = 3; +} + +// BuildCondition describes the state of a build at a certain point. +message BuildCondition { + // Type of build condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // The last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// Build configurations define a build process for new container images. There are three types of builds possible - a container image build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary container images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the container image registry specified in the "output" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created. +// +// Each build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have "output" set can be used to test code or run a verification build. +message BuildConfig { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec holds all the input necessary to produce a new build, and the conditions when + // to trigger them. + optional BuildConfigSpec spec = 2; + + // status holds any relevant information about a build config + // +optional + optional BuildConfigStatus status = 3; +} + +// BuildConfigList is a collection of BuildConfigs. +message BuildConfigList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of build configs + repeated BuildConfig items = 2; +} + +// BuildConfigSpec describes when and how builds are created +message BuildConfigSpec { + // triggers determine how new Builds can be launched from a BuildConfig. If + // no triggers are defined, a new build can only occur as a result of an + // explicit client build creation. + repeated BuildTriggerPolicy triggers = 1; + + // RunPolicy describes how the new build created from this build + // configuration will be scheduled for execution. + // This is optional, if not specified we default to "Serial". + optional string runPolicy = 2; + + // CommonSpec is the desired build specification + optional CommonSpec commonSpec = 3; + + // successfulBuildsHistoryLimit is the number of old successful builds to retain. + // When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all successful builds are retained. + optional int32 successfulBuildsHistoryLimit = 4; + + // failedBuildsHistoryLimit is the number of old failed builds to retain. + // When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all failed builds are retained. + optional int32 failedBuildsHistoryLimit = 5; +} + +// BuildConfigStatus contains current state of the build config object. +message BuildConfigStatus { + // lastVersion is used to inform about number of last triggered build. + optional int64 lastVersion = 1; +} + +// BuildList is a collection of Builds. +message BuildList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of builds + repeated Build items = 2; +} + +// BuildLog is the (unused) resource associated with the build log redirector +message BuildLog { +} + +// BuildLogOptions is the REST options for a build log +message BuildLogOptions { + // cointainer for which to stream logs. Defaults to only container if there is one container in the pod. + optional string container = 1; + + // follow if true indicates that the build log should be streamed until + // the build terminates. + optional bool follow = 2; + + // previous returns previous build logs. Defaults to false. + optional bool previous = 3; + + // sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + optional int64 sinceSeconds = 4; + + // sinceTime is an RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; + + // timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + optional bool timestamps = 6; + + // tailLines, If set, is the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + optional int64 tailLines = 7; + + // limitBytes, If set, is the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + optional int64 limitBytes = 8; + + // noWait if true causes the call to return immediately even if the build + // is not available yet. Otherwise the server will wait until the build has started. + // TODO: Fix the tag to 'noWait' in v2 + optional bool nowait = 9; + + // version of the build for which to view logs. + optional int64 version = 10; +} + +// BuildOutput is input to a build strategy and describes the container image that the strategy +// should produce. +message BuildOutput { + // to defines an optional location to push the output of this build to. + // Kind must be one of 'ImageStreamTag' or 'DockerImage'. + // This value will be used to look up a container image repository to push to. + // In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of + // the build unless Namespace is specified. + optional k8s.io.api.core.v1.ObjectReference to = 1; + + // PushSecret is the name of a Secret that would be used for setting + // up the authentication for executing the Docker push to authentication + // enabled Docker Registry (or Docker Hub). + optional k8s.io.api.core.v1.LocalObjectReference pushSecret = 2; + + // imageLabels define a list of labels that are applied to the resulting image. If there + // are multiple labels with the same name then the last one in the list is used. + repeated ImageLabel imageLabels = 3; +} + +// A BuildPostCommitSpec holds a build post commit hook specification. The hook +// executes a command in a temporary container running the build output image, +// immediately after the last layer of the image is committed and before the +// image is pushed to a registry. The command is executed with the current +// working directory ($PWD) set to the image's WORKDIR. +// +// The build will be marked as failed if the hook execution fails. It will fail +// if the script or command return a non-zero exit code, or if there is any +// other error related to starting the temporary container. +// +// There are five different ways to configure the hook. As an example, all forms +// below are equivalent and will execute `rake test --verbose`. +// +// 1. Shell script: +// +// "postCommit": { +// "script": "rake test --verbose", +// } +// +// The above is a convenient form which is equivalent to: +// +// "postCommit": { +// "command": ["/bin/sh", "-ic"], +// "args": ["rake test --verbose"] +// } +// +// 2. A command as the image entrypoint: +// +// "postCommit": { +// "commit": ["rake", "test", "--verbose"] +// } +// +// Command overrides the image entrypoint in the exec form, as documented in +// Docker: https://docs.docker.com/engine/reference/builder/#entrypoint. +// +// 3. Pass arguments to the default entrypoint: +// +// "postCommit": { +// "args": ["rake", "test", "--verbose"] +// } +// +// This form is only useful if the image entrypoint can handle arguments. +// +// 4. Shell script with arguments: +// +// "postCommit": { +// "script": "rake test $1", +// "args": ["--verbose"] +// } +// +// This form is useful if you need to pass arguments that would otherwise be +// hard to quote properly in the shell script. In the script, $0 will be +// "/bin/sh" and $1, $2, etc, are the positional arguments from Args. +// +// 5. Command with arguments: +// +// "postCommit": { +// "command": ["rake", "test"], +// "args": ["--verbose"] +// } +// +// This form is equivalent to appending the arguments to the Command slice. +// +// It is invalid to provide both Script and Command simultaneously. If none of +// the fields are specified, the hook is not executed. +message BuildPostCommitSpec { + // command is the command to run. It may not be specified with Script. + // This might be needed if the image doesn't have `/bin/sh`, or if you + // do not want to use a shell. In all other cases, using Script might be + // more convenient. + repeated string command = 1; + + // args is a list of arguments that are provided to either Command, + // Script or the container image's default entrypoint. The arguments are + // placed immediately after the command to be run. + repeated string args = 2; + + // script is a shell script to be run with `/bin/sh -ic`. It may not be + // specified with Command. Use Script when a shell script is appropriate + // to execute the post build hook, for example for running unit tests + // with `rake test`. If you need control over the image entrypoint, or + // if the image does not have `/bin/sh`, use Command and/or Args. + // The `-i` flag is needed to support CentOS and RHEL images that use + // Software Collections (SCL), in order to have the appropriate + // collections enabled in the shell. E.g., in the Ruby image, this is + // necessary to make `ruby`, `bundle` and other binaries available in + // the PATH. + optional string script = 3; +} + +// BuildRequest is the resource used to pass parameters to build generator +message BuildRequest { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // revision is the information from the source for a specific repo snapshot. + optional SourceRevision revision = 2; + + // triggeredByImage is the Image that triggered this build. + optional k8s.io.api.core.v1.ObjectReference triggeredByImage = 3; + + // from is the reference to the ImageStreamTag that triggered the build. + optional k8s.io.api.core.v1.ObjectReference from = 4; + + // binary indicates a request to build from a binary provided to the builder + optional BinaryBuildSource binary = 5; + + // lastVersion (optional) is the LastVersion of the BuildConfig that was used + // to generate the build. If the BuildConfig in the generator doesn't match, a build will + // not be generated. + optional int64 lastVersion = 6; + + // env contains additional environment variables you want to pass into a builder container. + repeated k8s.io.api.core.v1.EnvVar env = 7; + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + repeated BuildTriggerCause triggeredBy = 8; + + // DockerStrategyOptions contains additional docker-strategy specific options for the build + optional DockerStrategyOptions dockerStrategyOptions = 9; + + // SourceStrategyOptions contains additional source-strategy specific options for the build + optional SourceStrategyOptions sourceStrategyOptions = 10; +} + +// BuildSource is the SCM used for the build. +message BuildSource { + // type of build input to accept + // +k8s:conversion-gen=false + optional string type = 1; + + // binary builds accept a binary as their input. The binary is generally assumed to be a tar, + // gzipped tar, or zip file depending on the strategy. For container image builds, this is the build + // context and an optional Dockerfile may be specified to override any Dockerfile in the + // build context. For Source builds, this is assumed to be an archive as described above. For + // Source and container image builds, if binary.asFile is set the build will receive a directory with + // a single file. contextDir may be used when an archive is provided. Custom builds will + // receive this binary as input on STDIN. + optional BinaryBuildSource binary = 2; + + // dockerfile is the raw contents of a Dockerfile which should be built. When this option is + // specified, the FROM may be modified based on your strategy base image and additional ENV + // stanzas from your strategy environment will be added after the FROM, but before the rest + // of your Dockerfile stanzas. The Dockerfile source type may be used with other options like + // git - in those cases the Git repo will have any innate Dockerfile replaced in the context + // dir. + optional string dockerfile = 3; + + // git contains optional information about git build source + optional GitBuildSource git = 4; + + // images describes a set of images to be used to provide source for the build + repeated ImageSource images = 5; + + // contextDir specifies the sub-directory where the source code for the application exists. + // This allows to have buildable sources in directory other than root of + // repository. + optional string contextDir = 6; + + // sourceSecret is the name of a Secret that would be used for setting + // up the authentication for cloning private repository. + // The secret contains valid credentials for remote repository, where the + // data's key represent the authentication method to be used and value is + // the base64 encoded credentials. Supported auth methods are: ssh-privatekey. + optional k8s.io.api.core.v1.LocalObjectReference sourceSecret = 7; + + // secrets represents a list of secrets and their destinations that will + // be used only for the build. + repeated SecretBuildSource secrets = 8; + + // configMaps represents a list of configMaps and their destinations that will + // be used for the build. + repeated ConfigMapBuildSource configMaps = 9; +} + +// BuildSpec has the information to represent a build and also additional +// information about a build +message BuildSpec { + // CommonSpec is the information that represents a build + optional CommonSpec commonSpec = 1; + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + repeated BuildTriggerCause triggeredBy = 2; +} + +// BuildStatus contains the status of a build +message BuildStatus { + // phase is the point in the build lifecycle. Possible values are + // "New", "Pending", "Running", "Complete", "Failed", "Error", and "Cancelled". + optional string phase = 1; + + // cancelled describes if a cancel event was triggered for the build. + optional bool cancelled = 2; + + // reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. + optional string reason = 3; + + // message is a human-readable message indicating details about why the build has this status. + optional string message = 4; + + // startTimestamp is a timestamp representing the server time when this Build started + // running in a Pod. + // It is represented in RFC3339 form and is in UTC. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTimestamp = 5; + + // completionTimestamp is a timestamp representing the server time when this Build was + // finished, whether that build failed or succeeded. It reflects the time at which + // the Pod running the Build terminated. + // It is represented in RFC3339 form and is in UTC. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTimestamp = 6; + + // duration contains time.Duration object describing build time. + optional int64 duration = 7; + + // outputDockerImageReference contains a reference to the container image that + // will be built by this build. Its value is computed from + // Build.Spec.Output.To, and should include the registry address, so that + // it can be used to push and pull the image. + optional string outputDockerImageReference = 8; + + // config is an ObjectReference to the BuildConfig this Build is based on. + optional k8s.io.api.core.v1.ObjectReference config = 9; + + // output describes the container image the build has produced. + optional BuildStatusOutput output = 10; + + // stages contains details about each stage that occurs during the build + // including start time, duration (in milliseconds), and the steps that + // occured within each stage. + repeated StageInfo stages = 11; + + // logSnippet is the last few lines of the build log. This value is only set for builds that failed. + optional string logSnippet = 12; + + // Conditions represents the latest available observations of a build's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated BuildCondition conditions = 13; +} + +// BuildStatusOutput contains the status of the built image. +message BuildStatusOutput { + // to describes the status of the built image being pushed to a registry. + optional BuildStatusOutputTo to = 1; +} + +// BuildStatusOutputTo describes the status of the built image with regards to +// image registry to which it was supposed to be pushed. +message BuildStatusOutputTo { + // imageDigest is the digest of the built container image. The digest uniquely + // identifies the image in the registry to which it was pushed. + // + // Please note that this field may not always be set even if the push + // completes successfully - e.g. when the registry returns no digest or + // returns it in a format that the builder doesn't understand. + optional string imageDigest = 1; +} + +// BuildStrategy contains the details of how to perform a build. +message BuildStrategy { + // type is the kind of build strategy. + // +k8s:conversion-gen=false + optional string type = 1; + + // dockerStrategy holds the parameters to the container image build strategy. + optional DockerBuildStrategy dockerStrategy = 2; + + // sourceStrategy holds the parameters to the Source build strategy. + optional SourceBuildStrategy sourceStrategy = 3; + + // customStrategy holds the parameters to the Custom build strategy + optional CustomBuildStrategy customStrategy = 4; + + // JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. + // Deprecated: use OpenShift Pipelines + optional JenkinsPipelineBuildStrategy jenkinsPipelineStrategy = 5; +} + +// BuildTriggerCause holds information about a triggered build. It is used for +// displaying build trigger data for each build and build configuration in oc +// describe. It is also used to describe which triggers led to the most recent +// update in the build configuration. +message BuildTriggerCause { + // message is used to store a human readable message for why the build was + // triggered. E.g.: "Manually triggered by user", "Configuration change",etc. + optional string message = 1; + + // genericWebHook holds data about a builds generic webhook trigger. + optional GenericWebHookCause genericWebHook = 2; + + // gitHubWebHook represents data for a GitHub webhook that fired a + // specific build. + optional GitHubWebHookCause githubWebHook = 3; + + // imageChangeBuild stores information about an imagechange event + // that triggered a new build. + optional ImageChangeCause imageChangeBuild = 4; + + // GitLabWebHook represents data for a GitLab webhook that fired a specific + // build. + optional GitLabWebHookCause gitlabWebHook = 5; + + // BitbucketWebHook represents data for a Bitbucket webhook that fired a + // specific build. + optional BitbucketWebHookCause bitbucketWebHook = 6; +} + +// BuildTriggerPolicy describes a policy for a single trigger that results in a new Build. +message BuildTriggerPolicy { + // type is the type of build trigger + optional string type = 1; + + // github contains the parameters for a GitHub webhook type of trigger + optional WebHookTrigger github = 2; + + // generic contains the parameters for a Generic webhook type of trigger + optional WebHookTrigger generic = 3; + + // imageChange contains parameters for an ImageChange type of trigger + optional ImageChangeTrigger imageChange = 4; + + // GitLabWebHook contains the parameters for a GitLab webhook type of trigger + optional WebHookTrigger gitlab = 5; + + // BitbucketWebHook contains the parameters for a Bitbucket webhook type of + // trigger + optional WebHookTrigger bitbucket = 6; +} + +// CommonSpec encapsulates all the inputs necessary to represent a build. +message CommonSpec { + // serviceAccount is the name of the ServiceAccount to use to run the pod + // created by this build. + // The pod will be allowed to use secrets referenced by the ServiceAccount + optional string serviceAccount = 1; + + // source describes the SCM in use. + optional BuildSource source = 2; + + // revision is the information from the source for a specific repo snapshot. + // This is optional. + optional SourceRevision revision = 3; + + // strategy defines how to perform a build. + optional BuildStrategy strategy = 4; + + // output describes the container image the Strategy should produce. + optional BuildOutput output = 5; + + // resources computes resource requirements to execute the build. + optional k8s.io.api.core.v1.ResourceRequirements resources = 6; + + // postCommit is a build hook executed after the build output image is + // committed, before it is pushed to a registry. + optional BuildPostCommitSpec postCommit = 7; + + // completionDeadlineSeconds is an optional duration in seconds, counted from + // the time when a build pod gets scheduled in the system, that the build may + // be active on a node before the system actively tries to terminate the + // build; value must be positive integer + optional int64 completionDeadlineSeconds = 8; + + // nodeSelector is a selector which must be true for the build pod to fit on a node + // If nil, it can be overridden by default build nodeselector values for the cluster. + // If set to an empty map or a map with any values, default build nodeselector values + // are ignored. + // +optional + optional OptionalNodeSelector nodeSelector = 9; +} + +// CommonWebHookCause factors out the identical format of these webhook +// causes into struct so we can share it in the specific causes; it is too late for +// GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket. +message CommonWebHookCause { + // Revision is the git source revision information of the trigger. + optional SourceRevision revision = 1; + + // Secret is the obfuscated webhook secret that triggered a build. + optional string secret = 2; +} + +// ConfigMapBuildSource describes a configmap and its destination directory that will be +// used only at the build time. The content of the configmap referenced here will +// be copied into the destination directory instead of mounting. +message ConfigMapBuildSource { + // configMap is a reference to an existing configmap that you want to use in your + // build. + optional k8s.io.api.core.v1.LocalObjectReference configMap = 1; + + // destinationDir is the directory where the files from the configmap should be + // available for the build time. + // For the Source build strategy, these will be injected into a container + // where the assemble script runs. + // For the container image build strategy, these will be copied into the build + // directory, where the Dockerfile is located, so users can ADD or COPY them + // during container image build. + optional string destinationDir = 2; +} + +// CustomBuildStrategy defines input parameters specific to Custom build. +message CustomBuildStrategy { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the container image should be pulled + optional k8s.io.api.core.v1.ObjectReference from = 1; + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + optional k8s.io.api.core.v1.LocalObjectReference pullSecret = 2; + + // env contains additional environment variables you want to pass into a builder container. + repeated k8s.io.api.core.v1.EnvVar env = 3; + + // exposeDockerSocket will allow running Docker commands (and build container images) from + // inside the container. + // TODO: Allow admins to enforce 'false' for this option + optional bool exposeDockerSocket = 4; + + // forcePull describes if the controller should configure the build pod to always pull the images + // for the builder or only pull if it is not present locally + optional bool forcePull = 5; + + // secrets is a list of additional secrets that will be included in the build pod + repeated SecretSpec secrets = 6; + + // buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder + optional string buildAPIVersion = 7; +} + +// DockerBuildStrategy defines input parameters specific to container image build. +message DockerBuildStrategy { + // from is a reference to an DockerImage, ImageStreamTag, or ImageStreamImage which overrides + // the FROM image in the Dockerfile for the build. If the Dockerfile uses multi-stage builds, + // this will replace the image in the last FROM directive of the file. + optional k8s.io.api.core.v1.ObjectReference from = 1; + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + optional k8s.io.api.core.v1.LocalObjectReference pullSecret = 2; + + // noCache if set to true indicates that the container image build must be executed with the + // --no-cache=true flag + optional bool noCache = 3; + + // env contains additional environment variables you want to pass into a builder container. + repeated k8s.io.api.core.v1.EnvVar env = 4; + + // forcePull describes if the builder should pull the images from registry prior to building. + optional bool forcePull = 5; + + // dockerfilePath is the path of the Dockerfile that will be used to build the container image, + // relative to the root of the context (contextDir). + optional string dockerfilePath = 6; + + // buildArgs contains build arguments that will be resolved in the Dockerfile. See + // https://docs.docker.com/engine/reference/builder/#/arg for more details. + repeated k8s.io.api.core.v1.EnvVar buildArgs = 7; + + // imageOptimizationPolicy describes what optimizations the system can use when building images + // to reduce the final size or time spent building the image. The default policy is 'None' which + // means the final build image will be equivalent to an image created by the container image build API. + // The experimental policy 'SkipLayers' will avoid commiting new layers in between each + // image step, and will fail if the Dockerfile cannot provide compatibility with the 'None' + // policy. An additional experimental policy 'SkipLayersAndWarn' is the same as + // 'SkipLayers' but simply warns if compatibility cannot be preserved. + optional string imageOptimizationPolicy = 8; +} + +// DockerStrategyOptions contains extra strategy options for container image builds +message DockerStrategyOptions { + // Args contains any build arguments that are to be passed to Docker. See + // https://docs.docker.com/engine/reference/builder/#/arg for more details + repeated k8s.io.api.core.v1.EnvVar buildArgs = 1; + + // noCache overrides the docker-strategy noCache option in the build config + optional bool noCache = 2; +} + +// GenericWebHookCause holds information about a generic WebHook that +// triggered a build. +message GenericWebHookCause { + // revision is an optional field that stores the git source revision + // information of the generic webhook trigger when it is available. + optional SourceRevision revision = 1; + + // secret is the obfuscated webhook secret that triggered a build. + optional string secret = 2; +} + +// GenericWebHookEvent is the payload expected for a generic webhook post +message GenericWebHookEvent { + // type is the type of source repository + // +k8s:conversion-gen=false + optional string type = 1; + + // git is the git information if the Type is BuildSourceGit + optional GitInfo git = 2; + + // env contains additional environment variables you want to pass into a builder container. + // ValueFrom is not supported. + repeated k8s.io.api.core.v1.EnvVar env = 3; + + // DockerStrategyOptions contains additional docker-strategy specific options for the build + optional DockerStrategyOptions dockerStrategyOptions = 4; +} + +// GitBuildSource defines the parameters of a Git SCM +message GitBuildSource { + // uri points to the source that will be built. The structure of the source + // will depend on the type of build to run + optional string uri = 1; + + // ref is the branch/tag/ref to build. + optional string ref = 2; + + // proxyConfig defines the proxies to use for the git clone operation. Values + // not set here are inherited from cluster-wide build git proxy settings. + optional ProxyConfig proxyConfig = 3; +} + +// GitHubWebHookCause has information about a GitHub webhook that triggered a +// build. +message GitHubWebHookCause { + // revision is the git revision information of the trigger. + optional SourceRevision revision = 1; + + // secret is the obfuscated webhook secret that triggered a build. + optional string secret = 2; +} + +// GitInfo is the aggregated git information for a generic webhook post +message GitInfo { + optional GitBuildSource gitBuildSource = 1; + + optional GitSourceRevision gitSourceRevision = 2; + + // Refs is a list of GitRefs for the provided repo - generally sent + // when used from a post-receive hook. This field is optional and is + // used when sending multiple refs + repeated GitRefInfo refs = 3; +} + +// GitLabWebHookCause has information about a GitLab webhook that triggered a +// build. +message GitLabWebHookCause { + optional CommonWebHookCause commonSpec = 1; +} + +// GitRefInfo is a single ref +message GitRefInfo { + optional GitBuildSource gitBuildSource = 1; + + optional GitSourceRevision gitSourceRevision = 2; +} + +// GitSourceRevision is the commit information from a git source for a build +message GitSourceRevision { + // commit is the commit hash identifying a specific commit + optional string commit = 1; + + // author is the author of a specific commit + optional SourceControlUser author = 2; + + // committer is the committer of a specific commit + optional SourceControlUser committer = 3; + + // message is the description of a specific commit + optional string message = 4; +} + +// ImageChangeCause contains information about the image that triggered a +// build +message ImageChangeCause { + // imageID is the ID of the image that triggered a a new build. + optional string imageID = 1; + + // fromRef contains detailed information about an image that triggered a + // build. + optional k8s.io.api.core.v1.ObjectReference fromRef = 2; +} + +// ImageChangeTrigger allows builds to be triggered when an ImageStream changes +message ImageChangeTrigger { + // lastTriggeredImageID is used internally by the ImageChangeController to save last + // used image ID for build + optional string lastTriggeredImageID = 1; + + // from is a reference to an ImageStreamTag that will trigger a build when updated + // It is optional. If no From is specified, the From image from the build strategy + // will be used. Only one ImageChangeTrigger with an empty From reference is allowed in + // a build configuration. + optional k8s.io.api.core.v1.ObjectReference from = 2; + + // paused is true if this trigger is temporarily disabled. Optional. + optional bool paused = 3; +} + +// ImageLabel represents a label applied to the resulting image. +message ImageLabel { + // name defines the name of the label. It must have non-zero length. + optional string name = 1; + + // value defines the literal value of the label. + optional string value = 2; +} + +// ImageSource is used to describe build source that will be extracted from an image or used during a +// multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. +// A pull secret can be specified to pull the image from an external registry or override the default +// service account secret if pulling from the internal registry. Image sources can either be used to +// extract content from an image and place it into the build context along with the repository source, +// or used directly during a multi-stage container image build to allow content to be copied without overwriting +// the contents of the repository source (see the 'paths' and 'as' fields). +message ImageSource { + // from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to + // copy source from. + optional k8s.io.api.core.v1.ObjectReference from = 1; + + // A list of image names that this source will be used in place of during a multi-stage container image + // build. For instance, a Dockerfile that uses "COPY --from=nginx:latest" will first check for an image + // source that has "nginx:latest" in this field before attempting to pull directly. If the Dockerfile + // does not reference an image source it is ignored. This field and paths may both be set, in which case + // the contents will be used twice. + // +optional + repeated string as = 4; + + // paths is a list of source and destination paths to copy from the image. This content will be copied + // into the build context prior to starting the build. If no paths are set, the build context will + // not be altered. + // +optional + repeated ImageSourcePath paths = 2; + + // pullSecret is a reference to a secret to be used to pull the image from a registry + // If the image is pulled from the OpenShift registry, this field does not need to be set. + optional k8s.io.api.core.v1.LocalObjectReference pullSecret = 3; +} + +// ImageSourcePath describes a path to be copied from a source image and its destination within the build directory. +message ImageSourcePath { + // sourcePath is the absolute path of the file or directory inside the image to + // copy to the build directory. If the source path ends in /. then the content of + // the directory will be copied, but the directory itself will not be created at the + // destination. + optional string sourcePath = 1; + + // destinationDir is the relative directory within the build directory + // where files copied from the image are placed. + optional string destinationDir = 2; +} + +// JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. +// Deprecated: use OpenShift Pipelines +message JenkinsPipelineBuildStrategy { + // JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline + // relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are + // both not specified, this defaults to Jenkinsfile in the root of the specified contextDir. + optional string jenkinsfilePath = 1; + + // Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. + optional string jenkinsfile = 2; + + // env contains additional environment variables you want to pass into a build pipeline. + repeated k8s.io.api.core.v1.EnvVar env = 3; +} + +// OptionalNodeSelector is a map that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message OptionalNodeSelector { + // items, if empty, will result in an empty map + + map<string, string> items = 1; +} + +// ProxyConfig defines what proxies to use for an operation +message ProxyConfig { + // httpProxy is a proxy used to reach the git repository over http + optional string httpProxy = 3; + + // httpsProxy is a proxy used to reach the git repository over https + optional string httpsProxy = 4; + + // noProxy is the list of domains for which the proxy should not be used + optional string noProxy = 5; +} + +// SecretBuildSource describes a secret and its destination directory that will be +// used only at the build time. The content of the secret referenced here will +// be copied into the destination directory instead of mounting. +message SecretBuildSource { + // secret is a reference to an existing secret that you want to use in your + // build. + optional k8s.io.api.core.v1.LocalObjectReference secret = 1; + + // destinationDir is the directory where the files from the secret should be + // available for the build time. + // For the Source build strategy, these will be injected into a container + // where the assemble script runs. Later, when the script finishes, all files + // injected will be truncated to zero length. + // For the container image build strategy, these will be copied into the build + // directory, where the Dockerfile is located, so users can ADD or COPY them + // during container image build. + optional string destinationDir = 2; +} + +// SecretLocalReference contains information that points to the local secret being used +message SecretLocalReference { + // Name is the name of the resource in the same namespace being referenced + optional string name = 1; +} + +// SecretSpec specifies a secret to be included in a build pod and its corresponding mount point +message SecretSpec { + // secretSource is a reference to the secret + optional k8s.io.api.core.v1.LocalObjectReference secretSource = 1; + + // mountPath is the path at which to mount the secret + optional string mountPath = 2; +} + +// SourceBuildStrategy defines input parameters specific to an Source build. +message SourceBuildStrategy { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the container image should be pulled + optional k8s.io.api.core.v1.ObjectReference from = 1; + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + optional k8s.io.api.core.v1.LocalObjectReference pullSecret = 2; + + // env contains additional environment variables you want to pass into a builder container. + repeated k8s.io.api.core.v1.EnvVar env = 3; + + // scripts is the location of Source scripts + optional string scripts = 4; + + // incremental flag forces the Source build to do incremental builds if true. + optional bool incremental = 5; + + // forcePull describes if the builder should pull the images from registry prior to building. + optional bool forcePull = 6; +} + +// SourceControlUser defines the identity of a user of source control +message SourceControlUser { + // name of the source control user + optional string name = 1; + + // email of the source control user + optional string email = 2; +} + +// SourceRevision is the revision or commit information from the source for the build +message SourceRevision { + // type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images' + // +k8s:conversion-gen=false + optional string type = 1; + + // Git contains information about git-based build source + optional GitSourceRevision git = 2; +} + +// SourceStrategyOptions contains extra strategy options for Source builds +message SourceStrategyOptions { + // incremental overrides the source-strategy incremental option in the build config + optional bool incremental = 1; +} + +// StageInfo contains details about a build stage. +message StageInfo { + // name is a unique identifier for each build stage that occurs. + optional string name = 1; + + // startTime is a timestamp representing the server time when this Stage started. + // It is represented in RFC3339 form and is in UTC. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; + + // durationMilliseconds identifies how long the stage took + // to complete in milliseconds. + // Note: the duration of a stage can exceed the sum of the duration of the steps within + // the stage as not all actions are accounted for in explicit build steps. + optional int64 durationMilliseconds = 3; + + // steps contains details about each step that occurs during a build stage + // including start time and duration in milliseconds. + repeated StepInfo steps = 4; +} + +// StepInfo contains details about a build step. +message StepInfo { + // name is a unique identifier for each build step. + optional string name = 1; + + // startTime is a timestamp representing the server time when this Step started. + // it is represented in RFC3339 form and is in UTC. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; + + // durationMilliseconds identifies how long the step took + // to complete in milliseconds. + optional int64 durationMilliseconds = 3; +} + +// WebHookTrigger is a trigger that gets invoked using a webhook type of post +message WebHookTrigger { + // secret used to validate requests. + // Deprecated: use SecretReference instead. + optional string secret = 1; + + // allowEnv determines whether the webhook can set environment variables; can only + // be set to true for GenericWebHook. + optional bool allowEnv = 2; + + // secretReference is a reference to a secret in the same namespace, + // containing the value to be validated when the webhook is invoked. + // The secret being referenced must contain a key named "WebHookSecretKey", the value + // of which will be checked against the value supplied in the webhook invocation. + optional SecretLocalReference secretReference = 3; +} + diff --git a/vendor/github.com/openshift/api/build/v1/legacy.go b/vendor/github.com/openshift/api/build/v1/legacy.go new file mode 100644 index 0000000000000000000000000000000000000000..a74627d2cda08bd81777c553e15068a5179c4c4b --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/legacy.go @@ -0,0 +1,28 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Build{}, + &BuildList{}, + &BuildConfig{}, + &BuildConfigList{}, + &BuildLog{}, + &BuildRequest{}, + &BuildLogOptions{}, + &BinaryBuildRequestOptions{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/build/v1/register.go b/vendor/github.com/openshift/api/build/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..16f68ea8cd6d81a40dd97c82572265b039ce32c1 --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/register.go @@ -0,0 +1,47 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "build.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// addKnownTypes adds types to API group +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Build{}, + &BuildList{}, + &BuildConfig{}, + &BuildConfigList{}, + &BuildLog{}, + &BuildRequest{}, + &BuildLogOptions{}, + &BinaryBuildRequestOptions{}, + // This is needed for webhooks + &corev1.PodProxyOptions{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/build/v1/types.go b/vendor/github.com/openshift/api/build/v1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..6b49e385a23a84dfb8eb788896ac80146dc34f16 --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/types.go @@ -0,0 +1,1257 @@ +package v1 + +import ( + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:method=UpdateDetails,verb=update,subresource=details +// +genclient:method=Clone,verb=create,subresource=clone,input=BuildRequest +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Build encapsulates the inputs needed to produce a new deployable image, as well as +// the status of the execution and a reference to the Pod which executed the build. +type Build struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is all the inputs used to execute the build. + Spec BuildSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // status is the current status of the build. + // +optional + Status BuildStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// BuildSpec has the information to represent a build and also additional +// information about a build +type BuildSpec struct { + // CommonSpec is the information that represents a build + CommonSpec `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"` + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + TriggeredBy []BuildTriggerCause `json:"triggeredBy" protobuf:"bytes,2,rep,name=triggeredBy"` +} + +// OptionalNodeSelector is a map that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type OptionalNodeSelector map[string]string + +func (t OptionalNodeSelector) String() string { + return fmt.Sprintf("%v", map[string]string(t)) +} + +// CommonSpec encapsulates all the inputs necessary to represent a build. +type CommonSpec struct { + // serviceAccount is the name of the ServiceAccount to use to run the pod + // created by this build. + // The pod will be allowed to use secrets referenced by the ServiceAccount + ServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,1,opt,name=serviceAccount"` + + // source describes the SCM in use. + Source BuildSource `json:"source,omitempty" protobuf:"bytes,2,opt,name=source"` + + // revision is the information from the source for a specific repo snapshot. + // This is optional. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,3,opt,name=revision"` + + // strategy defines how to perform a build. + Strategy BuildStrategy `json:"strategy" protobuf:"bytes,4,opt,name=strategy"` + + // output describes the container image the Strategy should produce. + Output BuildOutput `json:"output,omitempty" protobuf:"bytes,5,opt,name=output"` + + // resources computes resource requirements to execute the build. + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,6,opt,name=resources"` + + // postCommit is a build hook executed after the build output image is + // committed, before it is pushed to a registry. + PostCommit BuildPostCommitSpec `json:"postCommit,omitempty" protobuf:"bytes,7,opt,name=postCommit"` + + // completionDeadlineSeconds is an optional duration in seconds, counted from + // the time when a build pod gets scheduled in the system, that the build may + // be active on a node before the system actively tries to terminate the + // build; value must be positive integer + CompletionDeadlineSeconds *int64 `json:"completionDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=completionDeadlineSeconds"` + + // nodeSelector is a selector which must be true for the build pod to fit on a node + // If nil, it can be overridden by default build nodeselector values for the cluster. + // If set to an empty map or a map with any values, default build nodeselector values + // are ignored. + // +optional + NodeSelector OptionalNodeSelector `json:"nodeSelector" protobuf:"bytes,9,name=nodeSelector"` +} + +// BuildTriggerCause holds information about a triggered build. It is used for +// displaying build trigger data for each build and build configuration in oc +// describe. It is also used to describe which triggers led to the most recent +// update in the build configuration. +type BuildTriggerCause struct { + // message is used to store a human readable message for why the build was + // triggered. E.g.: "Manually triggered by user", "Configuration change",etc. + Message string `json:"message,omitempty" protobuf:"bytes,1,opt,name=message"` + + // genericWebHook holds data about a builds generic webhook trigger. + GenericWebHook *GenericWebHookCause `json:"genericWebHook,omitempty" protobuf:"bytes,2,opt,name=genericWebHook"` + + // gitHubWebHook represents data for a GitHub webhook that fired a + //specific build. + GitHubWebHook *GitHubWebHookCause `json:"githubWebHook,omitempty" protobuf:"bytes,3,opt,name=githubWebHook"` + + // imageChangeBuild stores information about an imagechange event + // that triggered a new build. + ImageChangeBuild *ImageChangeCause `json:"imageChangeBuild,omitempty" protobuf:"bytes,4,opt,name=imageChangeBuild"` + + // GitLabWebHook represents data for a GitLab webhook that fired a specific + // build. + GitLabWebHook *GitLabWebHookCause `json:"gitlabWebHook,omitempty" protobuf:"bytes,5,opt,name=gitlabWebHook"` + + // BitbucketWebHook represents data for a Bitbucket webhook that fired a + // specific build. + BitbucketWebHook *BitbucketWebHookCause `json:"bitbucketWebHook,omitempty" protobuf:"bytes,6,opt,name=bitbucketWebHook"` +} + +// GenericWebHookCause holds information about a generic WebHook that +// triggered a build. +type GenericWebHookCause struct { + // revision is an optional field that stores the git source revision + // information of the generic webhook trigger when it is available. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` + + // secret is the obfuscated webhook secret that triggered a build. + Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` +} + +// GitHubWebHookCause has information about a GitHub webhook that triggered a +// build. +type GitHubWebHookCause struct { + // revision is the git revision information of the trigger. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` + + // secret is the obfuscated webhook secret that triggered a build. + Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` +} + +// CommonWebHookCause factors out the identical format of these webhook +// causes into struct so we can share it in the specific causes; it is too late for +// GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket. +type CommonWebHookCause struct { + // Revision is the git source revision information of the trigger. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` + + // Secret is the obfuscated webhook secret that triggered a build. + Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` +} + +// GitLabWebHookCause has information about a GitLab webhook that triggered a +// build. +type GitLabWebHookCause struct { + CommonWebHookCause `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"` +} + +// BitbucketWebHookCause has information about a Bitbucket webhook that triggered a +// build. +type BitbucketWebHookCause struct { + CommonWebHookCause `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"` +} + +// ImageChangeCause contains information about the image that triggered a +// build +type ImageChangeCause struct { + // imageID is the ID of the image that triggered a a new build. + ImageID string `json:"imageID,omitempty" protobuf:"bytes,1,opt,name=imageID"` + + // fromRef contains detailed information about an image that triggered a + // build. + FromRef *corev1.ObjectReference `json:"fromRef,omitempty" protobuf:"bytes,2,opt,name=fromRef"` +} + +// BuildStatus contains the status of a build +type BuildStatus struct { + // phase is the point in the build lifecycle. Possible values are + // "New", "Pending", "Running", "Complete", "Failed", "Error", and "Cancelled". + Phase BuildPhase `json:"phase" protobuf:"bytes,1,opt,name=phase,casttype=BuildPhase"` + + // cancelled describes if a cancel event was triggered for the build. + Cancelled bool `json:"cancelled,omitempty" protobuf:"varint,2,opt,name=cancelled"` + + // reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. + Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason,casttype=StatusReason"` + + // message is a human-readable message indicating details about why the build has this status. + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + + // startTimestamp is a timestamp representing the server time when this Build started + // running in a Pod. + // It is represented in RFC3339 form and is in UTC. + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty" protobuf:"bytes,5,opt,name=startTimestamp"` + + // completionTimestamp is a timestamp representing the server time when this Build was + // finished, whether that build failed or succeeded. It reflects the time at which + // the Pod running the Build terminated. + // It is represented in RFC3339 form and is in UTC. + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty" protobuf:"bytes,6,opt,name=completionTimestamp"` + + // duration contains time.Duration object describing build time. + Duration time.Duration `json:"duration,omitempty" protobuf:"varint,7,opt,name=duration,casttype=time.Duration"` + + // outputDockerImageReference contains a reference to the container image that + // will be built by this build. Its value is computed from + // Build.Spec.Output.To, and should include the registry address, so that + // it can be used to push and pull the image. + OutputDockerImageReference string `json:"outputDockerImageReference,omitempty" protobuf:"bytes,8,opt,name=outputDockerImageReference"` + + // config is an ObjectReference to the BuildConfig this Build is based on. + Config *corev1.ObjectReference `json:"config,omitempty" protobuf:"bytes,9,opt,name=config"` + + // output describes the container image the build has produced. + Output BuildStatusOutput `json:"output,omitempty" protobuf:"bytes,10,opt,name=output"` + + // stages contains details about each stage that occurs during the build + // including start time, duration (in milliseconds), and the steps that + // occured within each stage. + Stages []StageInfo `json:"stages,omitempty" protobuf:"bytes,11,opt,name=stages"` + + // logSnippet is the last few lines of the build log. This value is only set for builds that failed. + LogSnippet string `json:"logSnippet,omitempty" protobuf:"bytes,12,opt,name=logSnippet"` + + // Conditions represents the latest available observations of a build's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []BuildCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,13,rep,name=conditions"` +} + +// StageInfo contains details about a build stage. +type StageInfo struct { + // name is a unique identifier for each build stage that occurs. + Name StageName `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // startTime is a timestamp representing the server time when this Stage started. + // It is represented in RFC3339 form and is in UTC. + StartTime metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` + + // durationMilliseconds identifies how long the stage took + // to complete in milliseconds. + // Note: the duration of a stage can exceed the sum of the duration of the steps within + // the stage as not all actions are accounted for in explicit build steps. + DurationMilliseconds int64 `json:"durationMilliseconds,omitempty" protobuf:"varint,3,opt,name=durationMilliseconds"` + + // steps contains details about each step that occurs during a build stage + // including start time and duration in milliseconds. + Steps []StepInfo `json:"steps,omitempty" protobuf:"bytes,4,opt,name=steps"` +} + +// StageName is the unique identifier for each build stage. +type StageName string + +// Valid values for StageName +const ( + // StageFetchInputs fetches any inputs such as source code. + StageFetchInputs StageName = "FetchInputs" + + // StagePullImages pulls any images that are needed such as + // base images or input images. + StagePullImages StageName = "PullImages" + + // StageBuild performs the steps necessary to build the image. + StageBuild StageName = "Build" + + // StagePostCommit executes any post commit steps. + StagePostCommit StageName = "PostCommit" + + // StagePushImage pushes the image to the node. + StagePushImage StageName = "PushImage" +) + +// StepInfo contains details about a build step. +type StepInfo struct { + // name is a unique identifier for each build step. + Name StepName `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // startTime is a timestamp representing the server time when this Step started. + // it is represented in RFC3339 form and is in UTC. + StartTime metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` + + // durationMilliseconds identifies how long the step took + // to complete in milliseconds. + DurationMilliseconds int64 `json:"durationMilliseconds,omitempty" protobuf:"varint,3,opt,name=durationMilliseconds"` +} + +// StepName is a unique identifier for each build step. +type StepName string + +// Valid values for StepName +const ( + // StepExecPostCommitHook executes the buildconfigs post commit hook. + StepExecPostCommitHook StepName = "RunPostCommitHook" + + // StepFetchGitSource fetches source code for the build. + StepFetchGitSource StepName = "FetchGitSource" + + // StepPullBaseImage pulls a base image for the build. + StepPullBaseImage StepName = "PullBaseImage" + + // StepPullInputImage pulls an input image for the build. + StepPullInputImage StepName = "PullInputImage" + + // StepPushImage pushes an image to the registry. + StepPushImage StepName = "PushImage" + + // StepPushDockerImage pushes a container image to the registry. + StepPushDockerImage StepName = "PushDockerImage" + + //StepDockerBuild performs the container image build + StepDockerBuild StepName = "DockerBuild" +) + +// BuildPhase represents the status of a build at a point in time. +type BuildPhase string + +// Valid values for BuildPhase. +const ( + // BuildPhaseNew is automatically assigned to a newly created build. + BuildPhaseNew BuildPhase = "New" + + // BuildPhasePending indicates that a pod name has been assigned and a build is + // about to start running. + BuildPhasePending BuildPhase = "Pending" + + // BuildPhaseRunning indicates that a pod has been created and a build is running. + BuildPhaseRunning BuildPhase = "Running" + + // BuildPhaseComplete indicates that a build has been successful. + BuildPhaseComplete BuildPhase = "Complete" + + // BuildPhaseFailed indicates that a build has executed and failed. + BuildPhaseFailed BuildPhase = "Failed" + + // BuildPhaseError indicates that an error prevented the build from executing. + BuildPhaseError BuildPhase = "Error" + + // BuildPhaseCancelled indicates that a running/pending build was stopped from executing. + BuildPhaseCancelled BuildPhase = "Cancelled" +) + +type BuildConditionType string + +// BuildCondition describes the state of a build at a certain point. +type BuildCondition struct { + // Type of build condition. + Type BuildConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildConditionType"` + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // The last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// StatusReason is a brief CamelCase string that describes a temporary or +// permanent build error condition, meant for machine parsing and tidy display +// in the CLI. +type StatusReason string + +// BuildStatusOutput contains the status of the built image. +type BuildStatusOutput struct { + // to describes the status of the built image being pushed to a registry. + To *BuildStatusOutputTo `json:"to,omitempty" protobuf:"bytes,1,opt,name=to"` +} + +// BuildStatusOutputTo describes the status of the built image with regards to +// image registry to which it was supposed to be pushed. +type BuildStatusOutputTo struct { + // imageDigest is the digest of the built container image. The digest uniquely + // identifies the image in the registry to which it was pushed. + // + // Please note that this field may not always be set even if the push + // completes successfully - e.g. when the registry returns no digest or + // returns it in a format that the builder doesn't understand. + ImageDigest string `json:"imageDigest,omitempty" protobuf:"bytes,1,opt,name=imageDigest"` +} + +// BuildSourceType is the type of SCM used. +type BuildSourceType string + +// Valid values for BuildSourceType. +const ( + //BuildSourceGit instructs a build to use a Git source control repository as the build input. + BuildSourceGit BuildSourceType = "Git" + // BuildSourceDockerfile uses a Dockerfile as the start of a build + BuildSourceDockerfile BuildSourceType = "Dockerfile" + // BuildSourceBinary indicates the build will accept a Binary file as input. + BuildSourceBinary BuildSourceType = "Binary" + // BuildSourceImage indicates the build will accept an image as input + BuildSourceImage BuildSourceType = "Image" + // BuildSourceNone indicates the build has no predefined input (only valid for Source and Custom Strategies) + BuildSourceNone BuildSourceType = "None" +) + +// BuildSource is the SCM used for the build. +type BuildSource struct { + // type of build input to accept + // +k8s:conversion-gen=false + Type BuildSourceType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` + + // binary builds accept a binary as their input. The binary is generally assumed to be a tar, + // gzipped tar, or zip file depending on the strategy. For container image builds, this is the build + // context and an optional Dockerfile may be specified to override any Dockerfile in the + // build context. For Source builds, this is assumed to be an archive as described above. For + // Source and container image builds, if binary.asFile is set the build will receive a directory with + // a single file. contextDir may be used when an archive is provided. Custom builds will + // receive this binary as input on STDIN. + Binary *BinaryBuildSource `json:"binary,omitempty" protobuf:"bytes,2,opt,name=binary"` + + // dockerfile is the raw contents of a Dockerfile which should be built. When this option is + // specified, the FROM may be modified based on your strategy base image and additional ENV + // stanzas from your strategy environment will be added after the FROM, but before the rest + // of your Dockerfile stanzas. The Dockerfile source type may be used with other options like + // git - in those cases the Git repo will have any innate Dockerfile replaced in the context + // dir. + Dockerfile *string `json:"dockerfile,omitempty" protobuf:"bytes,3,opt,name=dockerfile"` + + // git contains optional information about git build source + Git *GitBuildSource `json:"git,omitempty" protobuf:"bytes,4,opt,name=git"` + + // images describes a set of images to be used to provide source for the build + Images []ImageSource `json:"images,omitempty" protobuf:"bytes,5,rep,name=images"` + + // contextDir specifies the sub-directory where the source code for the application exists. + // This allows to have buildable sources in directory other than root of + // repository. + ContextDir string `json:"contextDir,omitempty" protobuf:"bytes,6,opt,name=contextDir"` + + // sourceSecret is the name of a Secret that would be used for setting + // up the authentication for cloning private repository. + // The secret contains valid credentials for remote repository, where the + // data's key represent the authentication method to be used and value is + // the base64 encoded credentials. Supported auth methods are: ssh-privatekey. + SourceSecret *corev1.LocalObjectReference `json:"sourceSecret,omitempty" protobuf:"bytes,7,opt,name=sourceSecret"` + + // secrets represents a list of secrets and their destinations that will + // be used only for the build. + Secrets []SecretBuildSource `json:"secrets,omitempty" protobuf:"bytes,8,rep,name=secrets"` + + // configMaps represents a list of configMaps and their destinations that will + // be used for the build. + ConfigMaps []ConfigMapBuildSource `json:"configMaps,omitempty" protobuf:"bytes,9,rep,name=configMaps"` +} + +// ImageSource is used to describe build source that will be extracted from an image or used during a +// multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. +// A pull secret can be specified to pull the image from an external registry or override the default +// service account secret if pulling from the internal registry. Image sources can either be used to +// extract content from an image and place it into the build context along with the repository source, +// or used directly during a multi-stage container image build to allow content to be copied without overwriting +// the contents of the repository source (see the 'paths' and 'as' fields). +type ImageSource struct { + // from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to + // copy source from. + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // A list of image names that this source will be used in place of during a multi-stage container image + // build. For instance, a Dockerfile that uses "COPY --from=nginx:latest" will first check for an image + // source that has "nginx:latest" in this field before attempting to pull directly. If the Dockerfile + // does not reference an image source it is ignored. This field and paths may both be set, in which case + // the contents will be used twice. + // +optional + As []string `json:"as" protobuf:"bytes,4,rep,name=as"` + + // paths is a list of source and destination paths to copy from the image. This content will be copied + // into the build context prior to starting the build. If no paths are set, the build context will + // not be altered. + // +optional + Paths []ImageSourcePath `json:"paths" protobuf:"bytes,2,rep,name=paths"` + + // pullSecret is a reference to a secret to be used to pull the image from a registry + // If the image is pulled from the OpenShift registry, this field does not need to be set. + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,3,opt,name=pullSecret"` +} + +// ImageSourcePath describes a path to be copied from a source image and its destination within the build directory. +type ImageSourcePath struct { + // sourcePath is the absolute path of the file or directory inside the image to + // copy to the build directory. If the source path ends in /. then the content of + // the directory will be copied, but the directory itself will not be created at the + // destination. + SourcePath string `json:"sourcePath" protobuf:"bytes,1,opt,name=sourcePath"` + + // destinationDir is the relative directory within the build directory + // where files copied from the image are placed. + DestinationDir string `json:"destinationDir" protobuf:"bytes,2,opt,name=destinationDir"` +} + +// SecretBuildSource describes a secret and its destination directory that will be +// used only at the build time. The content of the secret referenced here will +// be copied into the destination directory instead of mounting. +type SecretBuildSource struct { + // secret is a reference to an existing secret that you want to use in your + // build. + Secret corev1.LocalObjectReference `json:"secret" protobuf:"bytes,1,opt,name=secret"` + + // destinationDir is the directory where the files from the secret should be + // available for the build time. + // For the Source build strategy, these will be injected into a container + // where the assemble script runs. Later, when the script finishes, all files + // injected will be truncated to zero length. + // For the container image build strategy, these will be copied into the build + // directory, where the Dockerfile is located, so users can ADD or COPY them + // during container image build. + DestinationDir string `json:"destinationDir,omitempty" protobuf:"bytes,2,opt,name=destinationDir"` +} + +// ConfigMapBuildSource describes a configmap and its destination directory that will be +// used only at the build time. The content of the configmap referenced here will +// be copied into the destination directory instead of mounting. +type ConfigMapBuildSource struct { + // configMap is a reference to an existing configmap that you want to use in your + // build. + ConfigMap corev1.LocalObjectReference `json:"configMap" protobuf:"bytes,1,opt,name=configMap"` + + // destinationDir is the directory where the files from the configmap should be + // available for the build time. + // For the Source build strategy, these will be injected into a container + // where the assemble script runs. + // For the container image build strategy, these will be copied into the build + // directory, where the Dockerfile is located, so users can ADD or COPY them + // during container image build. + DestinationDir string `json:"destinationDir,omitempty" protobuf:"bytes,2,opt,name=destinationDir"` +} + +// BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, +// where the file will be extracted and used as the build source. +type BinaryBuildSource struct { + // asFile indicates that the provided binary input should be considered a single file + // within the build input. For example, specifying "webapp.war" would place the provided + // binary as `/webapp.war` for the builder. If left empty, the Docker and Source build + // strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. + // The custom strategy receives this binary as standard input. This filename may not + // contain slashes or be '..' or '.'. + AsFile string `json:"asFile,omitempty" protobuf:"bytes,1,opt,name=asFile"` +} + +// SourceRevision is the revision or commit information from the source for the build +type SourceRevision struct { + // type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images' + // +k8s:conversion-gen=false + Type BuildSourceType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` + + // Git contains information about git-based build source + Git *GitSourceRevision `json:"git,omitempty" protobuf:"bytes,2,opt,name=git"` +} + +// GitSourceRevision is the commit information from a git source for a build +type GitSourceRevision struct { + // commit is the commit hash identifying a specific commit + Commit string `json:"commit,omitempty" protobuf:"bytes,1,opt,name=commit"` + + // author is the author of a specific commit + Author SourceControlUser `json:"author,omitempty" protobuf:"bytes,2,opt,name=author"` + + // committer is the committer of a specific commit + Committer SourceControlUser `json:"committer,omitempty" protobuf:"bytes,3,opt,name=committer"` + + // message is the description of a specific commit + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` +} + +// ProxyConfig defines what proxies to use for an operation +type ProxyConfig struct { + // httpProxy is a proxy used to reach the git repository over http + HTTPProxy *string `json:"httpProxy,omitempty" protobuf:"bytes,3,opt,name=httpProxy"` + + // httpsProxy is a proxy used to reach the git repository over https + HTTPSProxy *string `json:"httpsProxy,omitempty" protobuf:"bytes,4,opt,name=httpsProxy"` + + // noProxy is the list of domains for which the proxy should not be used + NoProxy *string `json:"noProxy,omitempty" protobuf:"bytes,5,opt,name=noProxy"` +} + +// GitBuildSource defines the parameters of a Git SCM +type GitBuildSource struct { + // uri points to the source that will be built. The structure of the source + // will depend on the type of build to run + URI string `json:"uri" protobuf:"bytes,1,opt,name=uri"` + + // ref is the branch/tag/ref to build. + Ref string `json:"ref,omitempty" protobuf:"bytes,2,opt,name=ref"` + + // proxyConfig defines the proxies to use for the git clone operation. Values + // not set here are inherited from cluster-wide build git proxy settings. + ProxyConfig `json:",inline" protobuf:"bytes,3,opt,name=proxyConfig"` +} + +// SourceControlUser defines the identity of a user of source control +type SourceControlUser struct { + // name of the source control user + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // email of the source control user + Email string `json:"email,omitempty" protobuf:"bytes,2,opt,name=email"` +} + +// BuildStrategy contains the details of how to perform a build. +type BuildStrategy struct { + // type is the kind of build strategy. + // +k8s:conversion-gen=false + Type BuildStrategyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildStrategyType"` + + // dockerStrategy holds the parameters to the container image build strategy. + DockerStrategy *DockerBuildStrategy `json:"dockerStrategy,omitempty" protobuf:"bytes,2,opt,name=dockerStrategy"` + + // sourceStrategy holds the parameters to the Source build strategy. + SourceStrategy *SourceBuildStrategy `json:"sourceStrategy,omitempty" protobuf:"bytes,3,opt,name=sourceStrategy"` + + // customStrategy holds the parameters to the Custom build strategy + CustomStrategy *CustomBuildStrategy `json:"customStrategy,omitempty" protobuf:"bytes,4,opt,name=customStrategy"` + + // JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. + // Deprecated: use OpenShift Pipelines + JenkinsPipelineStrategy *JenkinsPipelineBuildStrategy `json:"jenkinsPipelineStrategy,omitempty" protobuf:"bytes,5,opt,name=jenkinsPipelineStrategy"` +} + +// BuildStrategyType describes a particular way of performing a build. +type BuildStrategyType string + +// Valid values for BuildStrategyType. +const ( + // DockerBuildStrategyType performs builds using a Dockerfile. + DockerBuildStrategyType BuildStrategyType = "Docker" + + // SourceBuildStrategyType performs builds build using Source To Images with a Git repository + // and a builder image. + SourceBuildStrategyType BuildStrategyType = "Source" + + // CustomBuildStrategyType performs builds using custom builder container image. + CustomBuildStrategyType BuildStrategyType = "Custom" + + // JenkinsPipelineBuildStrategyType indicates the build will run via Jenkine Pipeline. + JenkinsPipelineBuildStrategyType BuildStrategyType = "JenkinsPipeline" +) + +// CustomBuildStrategy defines input parameters specific to Custom build. +type CustomBuildStrategy struct { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the container image should be pulled + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"` + + // env contains additional environment variables you want to pass into a builder container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` + + // exposeDockerSocket will allow running Docker commands (and build container images) from + // inside the container. + // TODO: Allow admins to enforce 'false' for this option + ExposeDockerSocket bool `json:"exposeDockerSocket,omitempty" protobuf:"varint,4,opt,name=exposeDockerSocket"` + + // forcePull describes if the controller should configure the build pod to always pull the images + // for the builder or only pull if it is not present locally + ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,5,opt,name=forcePull"` + + // secrets is a list of additional secrets that will be included in the build pod + Secrets []SecretSpec `json:"secrets,omitempty" protobuf:"bytes,6,rep,name=secrets"` + + // buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder + BuildAPIVersion string `json:"buildAPIVersion,omitempty" protobuf:"bytes,7,opt,name=buildAPIVersion"` +} + +// ImageOptimizationPolicy describes what optimizations the builder can perform when building images. +type ImageOptimizationPolicy string + +const ( + // ImageOptimizationNone will generate a canonical container image as produced by the + // `container image build` command. + ImageOptimizationNone ImageOptimizationPolicy = "None" + + // ImageOptimizationSkipLayers is an experimental policy and will avoid creating + // unique layers for each dockerfile line, resulting in smaller images and saving time + // during creation. Some Dockerfile syntax is not fully supported - content added to + // a VOLUME by an earlier layer may have incorrect uid, gid, and filesystem permissions. + // If an unsupported setting is detected, the build will fail. + ImageOptimizationSkipLayers ImageOptimizationPolicy = "SkipLayers" + + // ImageOptimizationSkipLayersAndWarn is the same as SkipLayers, but will only + // warn to the build output instead of failing when unsupported syntax is detected. This + // policy is experimental. + ImageOptimizationSkipLayersAndWarn ImageOptimizationPolicy = "SkipLayersAndWarn" +) + +// DockerBuildStrategy defines input parameters specific to container image build. +type DockerBuildStrategy struct { + // from is a reference to an DockerImage, ImageStreamTag, or ImageStreamImage which overrides + // the FROM image in the Dockerfile for the build. If the Dockerfile uses multi-stage builds, + // this will replace the image in the last FROM directive of the file. + From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,1,opt,name=from"` + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"` + + // noCache if set to true indicates that the container image build must be executed with the + // --no-cache=true flag + NoCache bool `json:"noCache,omitempty" protobuf:"varint,3,opt,name=noCache"` + + // env contains additional environment variables you want to pass into a builder container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,4,rep,name=env"` + + // forcePull describes if the builder should pull the images from registry prior to building. + ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,5,opt,name=forcePull"` + + // dockerfilePath is the path of the Dockerfile that will be used to build the container image, + // relative to the root of the context (contextDir). + DockerfilePath string `json:"dockerfilePath,omitempty" protobuf:"bytes,6,opt,name=dockerfilePath"` + + // buildArgs contains build arguments that will be resolved in the Dockerfile. See + // https://docs.docker.com/engine/reference/builder/#/arg for more details. + BuildArgs []corev1.EnvVar `json:"buildArgs,omitempty" protobuf:"bytes,7,rep,name=buildArgs"` + + // imageOptimizationPolicy describes what optimizations the system can use when building images + // to reduce the final size or time spent building the image. The default policy is 'None' which + // means the final build image will be equivalent to an image created by the container image build API. + // The experimental policy 'SkipLayers' will avoid commiting new layers in between each + // image step, and will fail if the Dockerfile cannot provide compatibility with the 'None' + // policy. An additional experimental policy 'SkipLayersAndWarn' is the same as + // 'SkipLayers' but simply warns if compatibility cannot be preserved. + ImageOptimizationPolicy *ImageOptimizationPolicy `json:"imageOptimizationPolicy,omitempty" protobuf:"bytes,8,opt,name=imageOptimizationPolicy,casttype=ImageOptimizationPolicy"` +} + +// SourceBuildStrategy defines input parameters specific to an Source build. +type SourceBuildStrategy struct { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the container image should be pulled + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"` + + // env contains additional environment variables you want to pass into a builder container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` + + // scripts is the location of Source scripts + Scripts string `json:"scripts,omitempty" protobuf:"bytes,4,opt,name=scripts"` + + // incremental flag forces the Source build to do incremental builds if true. + Incremental *bool `json:"incremental,omitempty" protobuf:"varint,5,opt,name=incremental"` + + // forcePull describes if the builder should pull the images from registry prior to building. + ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,6,opt,name=forcePull"` + + // deprecated json field, do not reuse: runtimeImage + // +k8s:protobuf-deprecated=runtimeImage,7 + + // deprecated json field, do not reuse: runtimeArtifacts + // +k8s:protobuf-deprecated=runtimeArtifacts,8 + +} + +// JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. +// Deprecated: use OpenShift Pipelines +type JenkinsPipelineBuildStrategy struct { + // JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline + // relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are + // both not specified, this defaults to Jenkinsfile in the root of the specified contextDir. + JenkinsfilePath string `json:"jenkinsfilePath,omitempty" protobuf:"bytes,1,opt,name=jenkinsfilePath"` + + // Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. + Jenkinsfile string `json:"jenkinsfile,omitempty" protobuf:"bytes,2,opt,name=jenkinsfile"` + + // env contains additional environment variables you want to pass into a build pipeline. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` +} + +// A BuildPostCommitSpec holds a build post commit hook specification. The hook +// executes a command in a temporary container running the build output image, +// immediately after the last layer of the image is committed and before the +// image is pushed to a registry. The command is executed with the current +// working directory ($PWD) set to the image's WORKDIR. +// +// The build will be marked as failed if the hook execution fails. It will fail +// if the script or command return a non-zero exit code, or if there is any +// other error related to starting the temporary container. +// +// There are five different ways to configure the hook. As an example, all forms +// below are equivalent and will execute `rake test --verbose`. +// +// 1. Shell script: +// +// "postCommit": { +// "script": "rake test --verbose", +// } +// +// The above is a convenient form which is equivalent to: +// +// "postCommit": { +// "command": ["/bin/sh", "-ic"], +// "args": ["rake test --verbose"] +// } +// +// 2. A command as the image entrypoint: +// +// "postCommit": { +// "commit": ["rake", "test", "--verbose"] +// } +// +// Command overrides the image entrypoint in the exec form, as documented in +// Docker: https://docs.docker.com/engine/reference/builder/#entrypoint. +// +// 3. Pass arguments to the default entrypoint: +// +// "postCommit": { +// "args": ["rake", "test", "--verbose"] +// } +// +// This form is only useful if the image entrypoint can handle arguments. +// +// 4. Shell script with arguments: +// +// "postCommit": { +// "script": "rake test $1", +// "args": ["--verbose"] +// } +// +// This form is useful if you need to pass arguments that would otherwise be +// hard to quote properly in the shell script. In the script, $0 will be +// "/bin/sh" and $1, $2, etc, are the positional arguments from Args. +// +// 5. Command with arguments: +// +// "postCommit": { +// "command": ["rake", "test"], +// "args": ["--verbose"] +// } +// +// This form is equivalent to appending the arguments to the Command slice. +// +// It is invalid to provide both Script and Command simultaneously. If none of +// the fields are specified, the hook is not executed. +type BuildPostCommitSpec struct { + // command is the command to run. It may not be specified with Script. + // This might be needed if the image doesn't have `/bin/sh`, or if you + // do not want to use a shell. In all other cases, using Script might be + // more convenient. + Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"` + // args is a list of arguments that are provided to either Command, + // Script or the container image's default entrypoint. The arguments are + // placed immediately after the command to be run. + Args []string `json:"args,omitempty" protobuf:"bytes,2,rep,name=args"` + // script is a shell script to be run with `/bin/sh -ic`. It may not be + // specified with Command. Use Script when a shell script is appropriate + // to execute the post build hook, for example for running unit tests + // with `rake test`. If you need control over the image entrypoint, or + // if the image does not have `/bin/sh`, use Command and/or Args. + // The `-i` flag is needed to support CentOS and RHEL images that use + // Software Collections (SCL), in order to have the appropriate + // collections enabled in the shell. E.g., in the Ruby image, this is + // necessary to make `ruby`, `bundle` and other binaries available in + // the PATH. + Script string `json:"script,omitempty" protobuf:"bytes,3,opt,name=script"` +} + +// BuildOutput is input to a build strategy and describes the container image that the strategy +// should produce. +type BuildOutput struct { + // to defines an optional location to push the output of this build to. + // Kind must be one of 'ImageStreamTag' or 'DockerImage'. + // This value will be used to look up a container image repository to push to. + // In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of + // the build unless Namespace is specified. + To *corev1.ObjectReference `json:"to,omitempty" protobuf:"bytes,1,opt,name=to"` + + // PushSecret is the name of a Secret that would be used for setting + // up the authentication for executing the Docker push to authentication + // enabled Docker Registry (or Docker Hub). + PushSecret *corev1.LocalObjectReference `json:"pushSecret,omitempty" protobuf:"bytes,2,opt,name=pushSecret"` + + // imageLabels define a list of labels that are applied to the resulting image. If there + // are multiple labels with the same name then the last one in the list is used. + ImageLabels []ImageLabel `json:"imageLabels,omitempty" protobuf:"bytes,3,rep,name=imageLabels"` +} + +// ImageLabel represents a label applied to the resulting image. +type ImageLabel struct { + // name defines the name of the label. It must have non-zero length. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // value defines the literal value of the label. + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` +} + +// +genclient +// +genclient:method=Instantiate,verb=create,subresource=instantiate,input=BuildRequest,result=Build +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Build configurations define a build process for new container images. There are three types of builds possible - a container image build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary container images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the container image registry specified in the "output" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created. +// +// Each build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have "output" set can be used to test code or run a verification build. +type BuildConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec holds all the input necessary to produce a new build, and the conditions when + // to trigger them. + Spec BuildConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status holds any relevant information about a build config + // +optional + Status BuildConfigStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// BuildConfigSpec describes when and how builds are created +type BuildConfigSpec struct { + + //triggers determine how new Builds can be launched from a BuildConfig. If + //no triggers are defined, a new build can only occur as a result of an + //explicit client build creation. + Triggers []BuildTriggerPolicy `json:"triggers" protobuf:"bytes,1,rep,name=triggers"` + + // RunPolicy describes how the new build created from this build + // configuration will be scheduled for execution. + // This is optional, if not specified we default to "Serial". + RunPolicy BuildRunPolicy `json:"runPolicy,omitempty" protobuf:"bytes,2,opt,name=runPolicy,casttype=BuildRunPolicy"` + + // CommonSpec is the desired build specification + CommonSpec `json:",inline" protobuf:"bytes,3,opt,name=commonSpec"` + + // successfulBuildsHistoryLimit is the number of old successful builds to retain. + // When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all successful builds are retained. + SuccessfulBuildsHistoryLimit *int32 `json:"successfulBuildsHistoryLimit,omitempty" protobuf:"varint,4,opt,name=successfulBuildsHistoryLimit"` + + // failedBuildsHistoryLimit is the number of old failed builds to retain. + // When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all failed builds are retained. + FailedBuildsHistoryLimit *int32 `json:"failedBuildsHistoryLimit,omitempty" protobuf:"varint,5,opt,name=failedBuildsHistoryLimit"` +} + +// BuildRunPolicy defines the behaviour of how the new builds are executed +// from the existing build configuration. +type BuildRunPolicy string + +const ( + // BuildRunPolicyParallel schedules new builds immediately after they are + // created. Builds will be executed in parallel. + BuildRunPolicyParallel BuildRunPolicy = "Parallel" + + // BuildRunPolicySerial schedules new builds to execute in a sequence as + // they are created. Every build gets queued up and will execute when the + // previous build completes. This is the default policy. + BuildRunPolicySerial BuildRunPolicy = "Serial" + + // BuildRunPolicySerialLatestOnly schedules only the latest build to execute, + // cancelling all the previously queued build. + BuildRunPolicySerialLatestOnly BuildRunPolicy = "SerialLatestOnly" +) + +// BuildConfigStatus contains current state of the build config object. +type BuildConfigStatus struct { + // lastVersion is used to inform about number of last triggered build. + LastVersion int64 `json:"lastVersion" protobuf:"varint,1,opt,name=lastVersion"` +} + +// SecretLocalReference contains information that points to the local secret being used +type SecretLocalReference struct { + // Name is the name of the resource in the same namespace being referenced + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// WebHookTrigger is a trigger that gets invoked using a webhook type of post +type WebHookTrigger struct { + // secret used to validate requests. + // Deprecated: use SecretReference instead. + Secret string `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"` + + // allowEnv determines whether the webhook can set environment variables; can only + // be set to true for GenericWebHook. + AllowEnv bool `json:"allowEnv,omitempty" protobuf:"varint,2,opt,name=allowEnv"` + + // secretReference is a reference to a secret in the same namespace, + // containing the value to be validated when the webhook is invoked. + // The secret being referenced must contain a key named "WebHookSecretKey", the value + // of which will be checked against the value supplied in the webhook invocation. + SecretReference *SecretLocalReference `json:"secretReference,omitempty" protobuf:"bytes,3,opt,name=secretReference"` +} + +// ImageChangeTrigger allows builds to be triggered when an ImageStream changes +type ImageChangeTrigger struct { + // lastTriggeredImageID is used internally by the ImageChangeController to save last + // used image ID for build + LastTriggeredImageID string `json:"lastTriggeredImageID,omitempty" protobuf:"bytes,1,opt,name=lastTriggeredImageID"` + + // from is a reference to an ImageStreamTag that will trigger a build when updated + // It is optional. If no From is specified, the From image from the build strategy + // will be used. Only one ImageChangeTrigger with an empty From reference is allowed in + // a build configuration. + From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,2,opt,name=from"` + + // paused is true if this trigger is temporarily disabled. Optional. + Paused bool `json:"paused,omitempty" protobuf:"varint,3,opt,name=paused"` +} + +// BuildTriggerPolicy describes a policy for a single trigger that results in a new Build. +type BuildTriggerPolicy struct { + // type is the type of build trigger + Type BuildTriggerType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildTriggerType"` + + // github contains the parameters for a GitHub webhook type of trigger + GitHubWebHook *WebHookTrigger `json:"github,omitempty" protobuf:"bytes,2,opt,name=github"` + + // generic contains the parameters for a Generic webhook type of trigger + GenericWebHook *WebHookTrigger `json:"generic,omitempty" protobuf:"bytes,3,opt,name=generic"` + + // imageChange contains parameters for an ImageChange type of trigger + ImageChange *ImageChangeTrigger `json:"imageChange,omitempty" protobuf:"bytes,4,opt,name=imageChange"` + + // GitLabWebHook contains the parameters for a GitLab webhook type of trigger + GitLabWebHook *WebHookTrigger `json:"gitlab,omitempty" protobuf:"bytes,5,opt,name=gitlab"` + + // BitbucketWebHook contains the parameters for a Bitbucket webhook type of + // trigger + BitbucketWebHook *WebHookTrigger `json:"bitbucket,omitempty" protobuf:"bytes,6,opt,name=bitbucket"` +} + +// BuildTriggerType refers to a specific BuildTriggerPolicy implementation. +type BuildTriggerType string + +const ( + // GitHubWebHookBuildTriggerType represents a trigger that launches builds on + // GitHub webhook invocations + GitHubWebHookBuildTriggerType BuildTriggerType = "GitHub" + GitHubWebHookBuildTriggerTypeDeprecated BuildTriggerType = "github" + + // GenericWebHookBuildTriggerType represents a trigger that launches builds on + // generic webhook invocations + GenericWebHookBuildTriggerType BuildTriggerType = "Generic" + GenericWebHookBuildTriggerTypeDeprecated BuildTriggerType = "generic" + + // GitLabWebHookBuildTriggerType represents a trigger that launches builds on + // GitLab webhook invocations + GitLabWebHookBuildTriggerType BuildTriggerType = "GitLab" + + // BitbucketWebHookBuildTriggerType represents a trigger that launches builds on + // Bitbucket webhook invocations + BitbucketWebHookBuildTriggerType BuildTriggerType = "Bitbucket" + + // ImageChangeBuildTriggerType represents a trigger that launches builds on + // availability of a new version of an image + ImageChangeBuildTriggerType BuildTriggerType = "ImageChange" + ImageChangeBuildTriggerTypeDeprecated BuildTriggerType = "imageChange" + + // ConfigChangeBuildTriggerType will trigger a build on an initial build config creation + // WARNING: In the future the behavior will change to trigger a build on any config change + ConfigChangeBuildTriggerType BuildTriggerType = "ConfigChange" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildList is a collection of Builds. +type BuildList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of builds + Items []Build `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildConfigList is a collection of BuildConfigs. +type BuildConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of build configs + Items []BuildConfig `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// GenericWebHookEvent is the payload expected for a generic webhook post +type GenericWebHookEvent struct { + // type is the type of source repository + // +k8s:conversion-gen=false + Type BuildSourceType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` + + // git is the git information if the Type is BuildSourceGit + Git *GitInfo `json:"git,omitempty" protobuf:"bytes,2,opt,name=git"` + + // env contains additional environment variables you want to pass into a builder container. + // ValueFrom is not supported. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` + + // DockerStrategyOptions contains additional docker-strategy specific options for the build + DockerStrategyOptions *DockerStrategyOptions `json:"dockerStrategyOptions,omitempty" protobuf:"bytes,4,opt,name=dockerStrategyOptions"` +} + +// GitInfo is the aggregated git information for a generic webhook post +type GitInfo struct { + GitBuildSource `json:",inline" protobuf:"bytes,1,opt,name=gitBuildSource"` + GitSourceRevision `json:",inline" protobuf:"bytes,2,opt,name=gitSourceRevision"` + + // Refs is a list of GitRefs for the provided repo - generally sent + // when used from a post-receive hook. This field is optional and is + // used when sending multiple refs + Refs []GitRefInfo `json:"refs" protobuf:"bytes,3,rep,name=refs"` +} + +// GitRefInfo is a single ref +type GitRefInfo struct { + GitBuildSource `json:",inline" protobuf:"bytes,1,opt,name=gitBuildSource"` + GitSourceRevision `json:",inline" protobuf:"bytes,2,opt,name=gitSourceRevision"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildLog is the (unused) resource associated with the build log redirector +type BuildLog struct { + metav1.TypeMeta `json:",inline"` +} + +// DockerStrategyOptions contains extra strategy options for container image builds +type DockerStrategyOptions struct { + // Args contains any build arguments that are to be passed to Docker. See + // https://docs.docker.com/engine/reference/builder/#/arg for more details + BuildArgs []corev1.EnvVar `json:"buildArgs,omitempty" protobuf:"bytes,1,rep,name=buildArgs"` + + // noCache overrides the docker-strategy noCache option in the build config + NoCache *bool `json:"noCache,omitempty" protobuf:"varint,2,opt,name=noCache"` +} + +// SourceStrategyOptions contains extra strategy options for Source builds +type SourceStrategyOptions struct { + // incremental overrides the source-strategy incremental option in the build config + Incremental *bool `json:"incremental,omitempty" protobuf:"varint,1,opt,name=incremental"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildRequest is the resource used to pass parameters to build generator +type BuildRequest struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // revision is the information from the source for a specific repo snapshot. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"` + + // triggeredByImage is the Image that triggered this build. + TriggeredByImage *corev1.ObjectReference `json:"triggeredByImage,omitempty" protobuf:"bytes,3,opt,name=triggeredByImage"` + + // from is the reference to the ImageStreamTag that triggered the build. + From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,4,opt,name=from"` + + // binary indicates a request to build from a binary provided to the builder + Binary *BinaryBuildSource `json:"binary,omitempty" protobuf:"bytes,5,opt,name=binary"` + + // lastVersion (optional) is the LastVersion of the BuildConfig that was used + // to generate the build. If the BuildConfig in the generator doesn't match, a build will + // not be generated. + LastVersion *int64 `json:"lastVersion,omitempty" protobuf:"varint,6,opt,name=lastVersion"` + + // env contains additional environment variables you want to pass into a builder container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,7,rep,name=env"` + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + TriggeredBy []BuildTriggerCause `json:"triggeredBy" protobuf:"bytes,8,rep,name=triggeredBy"` + + // DockerStrategyOptions contains additional docker-strategy specific options for the build + DockerStrategyOptions *DockerStrategyOptions `json:"dockerStrategyOptions,omitempty" protobuf:"bytes,9,opt,name=dockerStrategyOptions"` + + // SourceStrategyOptions contains additional source-strategy specific options for the build + SourceStrategyOptions *SourceStrategyOptions `json:"sourceStrategyOptions,omitempty" protobuf:"bytes,10,opt,name=sourceStrategyOptions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BinaryBuildRequestOptions are the options required to fully speficy a binary build request +type BinaryBuildRequestOptions struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // asFile determines if the binary should be created as a file within the source rather than extracted as an archive + AsFile string `json:"asFile,omitempty" protobuf:"bytes,2,opt,name=asFile"` + + // TODO: Improve map[string][]string conversion so we can handled nested objects + + // revision.commit is the value identifying a specific commit + Commit string `json:"revision.commit,omitempty" protobuf:"bytes,3,opt,name=revisionCommit"` + + // revision.message is the description of a specific commit + Message string `json:"revision.message,omitempty" protobuf:"bytes,4,opt,name=revisionMessage"` + + // revision.authorName of the source control user + AuthorName string `json:"revision.authorName,omitempty" protobuf:"bytes,5,opt,name=revisionAuthorName"` + + // revision.authorEmail of the source control user + AuthorEmail string `json:"revision.authorEmail,omitempty" protobuf:"bytes,6,opt,name=revisionAuthorEmail"` + + // revision.committerName of the source control user + CommitterName string `json:"revision.committerName,omitempty" protobuf:"bytes,7,opt,name=revisionCommitterName"` + + // revision.committerEmail of the source control user + CommitterEmail string `json:"revision.committerEmail,omitempty" protobuf:"bytes,8,opt,name=revisionCommitterEmail"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildLogOptions is the REST options for a build log +type BuildLogOptions struct { + metav1.TypeMeta `json:",inline"` + + // cointainer for which to stream logs. Defaults to only container if there is one container in the pod. + Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"` + // follow if true indicates that the build log should be streamed until + // the build terminates. + Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"` + // previous returns previous build logs. Defaults to false. + Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"` + // sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"` + // sinceTime is an RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"` + // timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"` + // tailLines, If set, is the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"` + // limitBytes, If set, is the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` + + // noWait if true causes the call to return immediately even if the build + // is not available yet. Otherwise the server will wait until the build has started. + // TODO: Fix the tag to 'noWait' in v2 + NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"` + + // version of the build for which to view logs. + Version *int64 `json:"version,omitempty" protobuf:"varint,10,opt,name=version"` +} + +// SecretSpec specifies a secret to be included in a build pod and its corresponding mount point +type SecretSpec struct { + // secretSource is a reference to the secret + SecretSource corev1.LocalObjectReference `json:"secretSource" protobuf:"bytes,1,opt,name=secretSource"` + + // mountPath is the path at which to mount the secret + MountPath string `json:"mountPath" protobuf:"bytes,2,opt,name=mountPath"` +} diff --git a/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..d7e48fae269e2e0cebf90b68cf1e9f8a4bd29ab7 --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go @@ -0,0 +1,1480 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BinaryBuildRequestOptions) DeepCopyInto(out *BinaryBuildRequestOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BinaryBuildRequestOptions. +func (in *BinaryBuildRequestOptions) DeepCopy() *BinaryBuildRequestOptions { + if in == nil { + return nil + } + out := new(BinaryBuildRequestOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BinaryBuildRequestOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BinaryBuildSource) DeepCopyInto(out *BinaryBuildSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BinaryBuildSource. +func (in *BinaryBuildSource) DeepCopy() *BinaryBuildSource { + if in == nil { + return nil + } + out := new(BinaryBuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BitbucketWebHookCause) DeepCopyInto(out *BitbucketWebHookCause) { + *out = *in + in.CommonWebHookCause.DeepCopyInto(&out.CommonWebHookCause) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BitbucketWebHookCause. +func (in *BitbucketWebHookCause) DeepCopy() *BitbucketWebHookCause { + if in == nil { + return nil + } + out := new(BitbucketWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Build) DeepCopyInto(out *Build) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build. +func (in *Build) DeepCopy() *Build { + if in == nil { + return nil + } + out := new(Build) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Build) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildCondition) DeepCopyInto(out *BuildCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildCondition. +func (in *BuildCondition) DeepCopy() *BuildCondition { + if in == nil { + return nil + } + out := new(BuildCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildConfig) DeepCopyInto(out *BuildConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfig. +func (in *BuildConfig) DeepCopy() *BuildConfig { + if in == nil { + return nil + } + out := new(BuildConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildConfigList) DeepCopyInto(out *BuildConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BuildConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfigList. +func (in *BuildConfigList) DeepCopy() *BuildConfigList { + if in == nil { + return nil + } + out := new(BuildConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildConfigSpec) DeepCopyInto(out *BuildConfigSpec) { + *out = *in + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make([]BuildTriggerPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.CommonSpec.DeepCopyInto(&out.CommonSpec) + if in.SuccessfulBuildsHistoryLimit != nil { + in, out := &in.SuccessfulBuildsHistoryLimit, &out.SuccessfulBuildsHistoryLimit + *out = new(int32) + **out = **in + } + if in.FailedBuildsHistoryLimit != nil { + in, out := &in.FailedBuildsHistoryLimit, &out.FailedBuildsHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfigSpec. +func (in *BuildConfigSpec) DeepCopy() *BuildConfigSpec { + if in == nil { + return nil + } + out := new(BuildConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildConfigStatus) DeepCopyInto(out *BuildConfigStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfigStatus. +func (in *BuildConfigStatus) DeepCopy() *BuildConfigStatus { + if in == nil { + return nil + } + out := new(BuildConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildList) DeepCopyInto(out *BuildList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Build, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList. +func (in *BuildList) DeepCopy() *BuildList { + if in == nil { + return nil + } + out := new(BuildList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildLog) DeepCopyInto(out *BuildLog) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildLog. +func (in *BuildLog) DeepCopy() *BuildLog { + if in == nil { + return nil + } + out := new(BuildLog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildLog) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildLogOptions) DeepCopyInto(out *BuildLogOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = (*in).DeepCopy() + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildLogOptions. +func (in *BuildLogOptions) DeepCopy() *BuildLogOptions { + if in == nil { + return nil + } + out := new(BuildLogOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildLogOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildOutput) DeepCopyInto(out *BuildOutput) { + *out = *in + if in.To != nil { + in, out := &in.To, &out.To + *out = new(corev1.ObjectReference) + **out = **in + } + if in.PushSecret != nil { + in, out := &in.PushSecret, &out.PushSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]ImageLabel, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOutput. +func (in *BuildOutput) DeepCopy() *BuildOutput { + if in == nil { + return nil + } + out := new(BuildOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildPostCommitSpec) DeepCopyInto(out *BuildPostCommitSpec) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildPostCommitSpec. +func (in *BuildPostCommitSpec) DeepCopy() *BuildPostCommitSpec { + if in == nil { + return nil + } + out := new(BuildPostCommitSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildRequest) DeepCopyInto(out *BuildRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + if in.TriggeredByImage != nil { + in, out := &in.TriggeredByImage, &out.TriggeredByImage + *out = new(corev1.ObjectReference) + **out = **in + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(corev1.ObjectReference) + **out = **in + } + if in.Binary != nil { + in, out := &in.Binary, &out.Binary + *out = new(BinaryBuildSource) + **out = **in + } + if in.LastVersion != nil { + in, out := &in.LastVersion, &out.LastVersion + *out = new(int64) + **out = **in + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TriggeredBy != nil { + in, out := &in.TriggeredBy, &out.TriggeredBy + *out = make([]BuildTriggerCause, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DockerStrategyOptions != nil { + in, out := &in.DockerStrategyOptions, &out.DockerStrategyOptions + *out = new(DockerStrategyOptions) + (*in).DeepCopyInto(*out) + } + if in.SourceStrategyOptions != nil { + in, out := &in.SourceStrategyOptions, &out.SourceStrategyOptions + *out = new(SourceStrategyOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildRequest. +func (in *BuildRequest) DeepCopy() *BuildRequest { + if in == nil { + return nil + } + out := new(BuildRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildSource) DeepCopyInto(out *BuildSource) { + *out = *in + if in.Binary != nil { + in, out := &in.Binary, &out.Binary + *out = new(BinaryBuildSource) + **out = **in + } + if in.Dockerfile != nil { + in, out := &in.Dockerfile, &out.Dockerfile + *out = new(string) + **out = **in + } + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitBuildSource) + (*in).DeepCopyInto(*out) + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ImageSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceSecret != nil { + in, out := &in.SourceSecret, &out.SourceSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretBuildSource, len(*in)) + copy(*out, *in) + } + if in.ConfigMaps != nil { + in, out := &in.ConfigMaps, &out.ConfigMaps + *out = make([]ConfigMapBuildSource, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSource. +func (in *BuildSource) DeepCopy() *BuildSource { + if in == nil { + return nil + } + out := new(BuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildSpec) DeepCopyInto(out *BuildSpec) { + *out = *in + in.CommonSpec.DeepCopyInto(&out.CommonSpec) + if in.TriggeredBy != nil { + in, out := &in.TriggeredBy, &out.TriggeredBy + *out = make([]BuildTriggerCause, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec. +func (in *BuildSpec) DeepCopy() *BuildSpec { + if in == nil { + return nil + } + out := new(BuildSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildStatus) DeepCopyInto(out *BuildStatus) { + *out = *in + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() + } + if in.CompletionTimestamp != nil { + in, out := &in.CompletionTimestamp, &out.CompletionTimestamp + *out = (*in).DeepCopy() + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(corev1.ObjectReference) + **out = **in + } + in.Output.DeepCopyInto(&out.Output) + if in.Stages != nil { + in, out := &in.Stages, &out.Stages + *out = make([]StageInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]BuildCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatus. +func (in *BuildStatus) DeepCopy() *BuildStatus { + if in == nil { + return nil + } + out := new(BuildStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildStatusOutput) DeepCopyInto(out *BuildStatusOutput) { + *out = *in + if in.To != nil { + in, out := &in.To, &out.To + *out = new(BuildStatusOutputTo) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatusOutput. +func (in *BuildStatusOutput) DeepCopy() *BuildStatusOutput { + if in == nil { + return nil + } + out := new(BuildStatusOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildStatusOutputTo) DeepCopyInto(out *BuildStatusOutputTo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatusOutputTo. +func (in *BuildStatusOutputTo) DeepCopy() *BuildStatusOutputTo { + if in == nil { + return nil + } + out := new(BuildStatusOutputTo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildStrategy) DeepCopyInto(out *BuildStrategy) { + *out = *in + if in.DockerStrategy != nil { + in, out := &in.DockerStrategy, &out.DockerStrategy + *out = new(DockerBuildStrategy) + (*in).DeepCopyInto(*out) + } + if in.SourceStrategy != nil { + in, out := &in.SourceStrategy, &out.SourceStrategy + *out = new(SourceBuildStrategy) + (*in).DeepCopyInto(*out) + } + if in.CustomStrategy != nil { + in, out := &in.CustomStrategy, &out.CustomStrategy + *out = new(CustomBuildStrategy) + (*in).DeepCopyInto(*out) + } + if in.JenkinsPipelineStrategy != nil { + in, out := &in.JenkinsPipelineStrategy, &out.JenkinsPipelineStrategy + *out = new(JenkinsPipelineBuildStrategy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStrategy. +func (in *BuildStrategy) DeepCopy() *BuildStrategy { + if in == nil { + return nil + } + out := new(BuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildTriggerCause) DeepCopyInto(out *BuildTriggerCause) { + *out = *in + if in.GenericWebHook != nil { + in, out := &in.GenericWebHook, &out.GenericWebHook + *out = new(GenericWebHookCause) + (*in).DeepCopyInto(*out) + } + if in.GitHubWebHook != nil { + in, out := &in.GitHubWebHook, &out.GitHubWebHook + *out = new(GitHubWebHookCause) + (*in).DeepCopyInto(*out) + } + if in.ImageChangeBuild != nil { + in, out := &in.ImageChangeBuild, &out.ImageChangeBuild + *out = new(ImageChangeCause) + (*in).DeepCopyInto(*out) + } + if in.GitLabWebHook != nil { + in, out := &in.GitLabWebHook, &out.GitLabWebHook + *out = new(GitLabWebHookCause) + (*in).DeepCopyInto(*out) + } + if in.BitbucketWebHook != nil { + in, out := &in.BitbucketWebHook, &out.BitbucketWebHook + *out = new(BitbucketWebHookCause) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildTriggerCause. +func (in *BuildTriggerCause) DeepCopy() *BuildTriggerCause { + if in == nil { + return nil + } + out := new(BuildTriggerCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildTriggerPolicy) DeepCopyInto(out *BuildTriggerPolicy) { + *out = *in + if in.GitHubWebHook != nil { + in, out := &in.GitHubWebHook, &out.GitHubWebHook + *out = new(WebHookTrigger) + (*in).DeepCopyInto(*out) + } + if in.GenericWebHook != nil { + in, out := &in.GenericWebHook, &out.GenericWebHook + *out = new(WebHookTrigger) + (*in).DeepCopyInto(*out) + } + if in.ImageChange != nil { + in, out := &in.ImageChange, &out.ImageChange + *out = new(ImageChangeTrigger) + (*in).DeepCopyInto(*out) + } + if in.GitLabWebHook != nil { + in, out := &in.GitLabWebHook, &out.GitLabWebHook + *out = new(WebHookTrigger) + (*in).DeepCopyInto(*out) + } + if in.BitbucketWebHook != nil { + in, out := &in.BitbucketWebHook, &out.BitbucketWebHook + *out = new(WebHookTrigger) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildTriggerPolicy. +func (in *BuildTriggerPolicy) DeepCopy() *BuildTriggerPolicy { + if in == nil { + return nil + } + out := new(BuildTriggerPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonSpec) DeepCopyInto(out *CommonSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + in.Strategy.DeepCopyInto(&out.Strategy) + in.Output.DeepCopyInto(&out.Output) + in.Resources.DeepCopyInto(&out.Resources) + in.PostCommit.DeepCopyInto(&out.PostCommit) + if in.CompletionDeadlineSeconds != nil { + in, out := &in.CompletionDeadlineSeconds, &out.CompletionDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(OptionalNodeSelector, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonSpec. +func (in *CommonSpec) DeepCopy() *CommonSpec { + if in == nil { + return nil + } + out := new(CommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonWebHookCause) DeepCopyInto(out *CommonWebHookCause) { + *out = *in + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonWebHookCause. +func (in *CommonWebHookCause) DeepCopy() *CommonWebHookCause { + if in == nil { + return nil + } + out := new(CommonWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapBuildSource) DeepCopyInto(out *ConfigMapBuildSource) { + *out = *in + out.ConfigMap = in.ConfigMap + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapBuildSource. +func (in *ConfigMapBuildSource) DeepCopy() *ConfigMapBuildSource { + if in == nil { + return nil + } + out := new(ConfigMapBuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomBuildStrategy) DeepCopyInto(out *CustomBuildStrategy) { + *out = *in + out.From = in.From + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomBuildStrategy. +func (in *CustomBuildStrategy) DeepCopy() *CustomBuildStrategy { + if in == nil { + return nil + } + out := new(CustomBuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerBuildStrategy) DeepCopyInto(out *DockerBuildStrategy) { + *out = *in + if in.From != nil { + in, out := &in.From, &out.From + *out = new(corev1.ObjectReference) + **out = **in + } + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BuildArgs != nil { + in, out := &in.BuildArgs, &out.BuildArgs + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ImageOptimizationPolicy != nil { + in, out := &in.ImageOptimizationPolicy, &out.ImageOptimizationPolicy + *out = new(ImageOptimizationPolicy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerBuildStrategy. +func (in *DockerBuildStrategy) DeepCopy() *DockerBuildStrategy { + if in == nil { + return nil + } + out := new(DockerBuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerStrategyOptions) DeepCopyInto(out *DockerStrategyOptions) { + *out = *in + if in.BuildArgs != nil { + in, out := &in.BuildArgs, &out.BuildArgs + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NoCache != nil { + in, out := &in.NoCache, &out.NoCache + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerStrategyOptions. +func (in *DockerStrategyOptions) DeepCopy() *DockerStrategyOptions { + if in == nil { + return nil + } + out := new(DockerStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericWebHookCause) DeepCopyInto(out *GenericWebHookCause) { + *out = *in + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericWebHookCause. +func (in *GenericWebHookCause) DeepCopy() *GenericWebHookCause { + if in == nil { + return nil + } + out := new(GenericWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericWebHookEvent) DeepCopyInto(out *GenericWebHookEvent) { + *out = *in + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitInfo) + (*in).DeepCopyInto(*out) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DockerStrategyOptions != nil { + in, out := &in.DockerStrategyOptions, &out.DockerStrategyOptions + *out = new(DockerStrategyOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericWebHookEvent. +func (in *GenericWebHookEvent) DeepCopy() *GenericWebHookEvent { + if in == nil { + return nil + } + out := new(GenericWebHookEvent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitBuildSource) DeepCopyInto(out *GitBuildSource) { + *out = *in + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitBuildSource. +func (in *GitBuildSource) DeepCopy() *GitBuildSource { + if in == nil { + return nil + } + out := new(GitBuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitHubWebHookCause) DeepCopyInto(out *GitHubWebHookCause) { + *out = *in + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubWebHookCause. +func (in *GitHubWebHookCause) DeepCopy() *GitHubWebHookCause { + if in == nil { + return nil + } + out := new(GitHubWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitInfo) DeepCopyInto(out *GitInfo) { + *out = *in + in.GitBuildSource.DeepCopyInto(&out.GitBuildSource) + out.GitSourceRevision = in.GitSourceRevision + if in.Refs != nil { + in, out := &in.Refs, &out.Refs + *out = make([]GitRefInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitInfo. +func (in *GitInfo) DeepCopy() *GitInfo { + if in == nil { + return nil + } + out := new(GitInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLabWebHookCause) DeepCopyInto(out *GitLabWebHookCause) { + *out = *in + in.CommonWebHookCause.DeepCopyInto(&out.CommonWebHookCause) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabWebHookCause. +func (in *GitLabWebHookCause) DeepCopy() *GitLabWebHookCause { + if in == nil { + return nil + } + out := new(GitLabWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRefInfo) DeepCopyInto(out *GitRefInfo) { + *out = *in + in.GitBuildSource.DeepCopyInto(&out.GitBuildSource) + out.GitSourceRevision = in.GitSourceRevision + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRefInfo. +func (in *GitRefInfo) DeepCopy() *GitRefInfo { + if in == nil { + return nil + } + out := new(GitRefInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitSourceRevision) DeepCopyInto(out *GitSourceRevision) { + *out = *in + out.Author = in.Author + out.Committer = in.Committer + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitSourceRevision. +func (in *GitSourceRevision) DeepCopy() *GitSourceRevision { + if in == nil { + return nil + } + out := new(GitSourceRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageChangeCause) DeepCopyInto(out *ImageChangeCause) { + *out = *in + if in.FromRef != nil { + in, out := &in.FromRef, &out.FromRef + *out = new(corev1.ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageChangeCause. +func (in *ImageChangeCause) DeepCopy() *ImageChangeCause { + if in == nil { + return nil + } + out := new(ImageChangeCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageChangeTrigger) DeepCopyInto(out *ImageChangeTrigger) { + *out = *in + if in.From != nil { + in, out := &in.From, &out.From + *out = new(corev1.ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageChangeTrigger. +func (in *ImageChangeTrigger) DeepCopy() *ImageChangeTrigger { + if in == nil { + return nil + } + out := new(ImageChangeTrigger) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLabel) DeepCopyInto(out *ImageLabel) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLabel. +func (in *ImageLabel) DeepCopy() *ImageLabel { + if in == nil { + return nil + } + out := new(ImageLabel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSource) DeepCopyInto(out *ImageSource) { + *out = *in + out.From = in.From + if in.As != nil { + in, out := &in.As, &out.As + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]ImageSourcePath, len(*in)) + copy(*out, *in) + } + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSource. +func (in *ImageSource) DeepCopy() *ImageSource { + if in == nil { + return nil + } + out := new(ImageSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSourcePath) DeepCopyInto(out *ImageSourcePath) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSourcePath. +func (in *ImageSourcePath) DeepCopy() *ImageSourcePath { + if in == nil { + return nil + } + out := new(ImageSourcePath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JenkinsPipelineBuildStrategy) DeepCopyInto(out *JenkinsPipelineBuildStrategy) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JenkinsPipelineBuildStrategy. +func (in *JenkinsPipelineBuildStrategy) DeepCopy() *JenkinsPipelineBuildStrategy { + if in == nil { + return nil + } + out := new(JenkinsPipelineBuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in OptionalNodeSelector) DeepCopyInto(out *OptionalNodeSelector) { + { + in := &in + *out = make(OptionalNodeSelector, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalNodeSelector. +func (in OptionalNodeSelector) DeepCopy() OptionalNodeSelector { + if in == nil { + return nil + } + out := new(OptionalNodeSelector) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { + *out = *in + if in.HTTPProxy != nil { + in, out := &in.HTTPProxy, &out.HTTPProxy + *out = new(string) + **out = **in + } + if in.HTTPSProxy != nil { + in, out := &in.HTTPSProxy, &out.HTTPSProxy + *out = new(string) + **out = **in + } + if in.NoProxy != nil { + in, out := &in.NoProxy, &out.NoProxy + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. +func (in *ProxyConfig) DeepCopy() *ProxyConfig { + if in == nil { + return nil + } + out := new(ProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretBuildSource) DeepCopyInto(out *SecretBuildSource) { + *out = *in + out.Secret = in.Secret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBuildSource. +func (in *SecretBuildSource) DeepCopy() *SecretBuildSource { + if in == nil { + return nil + } + out := new(SecretBuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretLocalReference) DeepCopyInto(out *SecretLocalReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretLocalReference. +func (in *SecretLocalReference) DeepCopy() *SecretLocalReference { + if in == nil { + return nil + } + out := new(SecretLocalReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretSpec) DeepCopyInto(out *SecretSpec) { + *out = *in + out.SecretSource = in.SecretSource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretSpec. +func (in *SecretSpec) DeepCopy() *SecretSpec { + if in == nil { + return nil + } + out := new(SecretSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceBuildStrategy) DeepCopyInto(out *SourceBuildStrategy) { + *out = *in + out.From = in.From + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Incremental != nil { + in, out := &in.Incremental, &out.Incremental + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceBuildStrategy. +func (in *SourceBuildStrategy) DeepCopy() *SourceBuildStrategy { + if in == nil { + return nil + } + out := new(SourceBuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceControlUser) DeepCopyInto(out *SourceControlUser) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceControlUser. +func (in *SourceControlUser) DeepCopy() *SourceControlUser { + if in == nil { + return nil + } + out := new(SourceControlUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceRevision) DeepCopyInto(out *SourceRevision) { + *out = *in + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitSourceRevision) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceRevision. +func (in *SourceRevision) DeepCopy() *SourceRevision { + if in == nil { + return nil + } + out := new(SourceRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceStrategyOptions) DeepCopyInto(out *SourceStrategyOptions) { + *out = *in + if in.Incremental != nil { + in, out := &in.Incremental, &out.Incremental + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceStrategyOptions. +func (in *SourceStrategyOptions) DeepCopy() *SourceStrategyOptions { + if in == nil { + return nil + } + out := new(SourceStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageInfo) DeepCopyInto(out *StageInfo) { + *out = *in + in.StartTime.DeepCopyInto(&out.StartTime) + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]StepInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageInfo. +func (in *StageInfo) DeepCopy() *StageInfo { + if in == nil { + return nil + } + out := new(StageInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepInfo) DeepCopyInto(out *StepInfo) { + *out = *in + in.StartTime.DeepCopyInto(&out.StartTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepInfo. +func (in *StepInfo) DeepCopy() *StepInfo { + if in == nil { + return nil + } + out := new(StepInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebHookTrigger) DeepCopyInto(out *WebHookTrigger) { + *out = *in + if in.SecretReference != nil { + in, out := &in.SecretReference, &out.SecretReference + *out = new(SecretLocalReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebHookTrigger. +func (in *WebHookTrigger) DeepCopy() *WebHookTrigger { + if in == nil { + return nil + } + out := new(WebHookTrigger) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..3a36c83e05f376a58e9291c9b1af007432dc4fe1 --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,628 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_BinaryBuildRequestOptions = map[string]string{ + "": "BinaryBuildRequestOptions are the options required to fully speficy a binary build request", + "asFile": "asFile determines if the binary should be created as a file within the source rather than extracted as an archive", + "revision.commit": "revision.commit is the value identifying a specific commit", + "revision.message": "revision.message is the description of a specific commit", + "revision.authorName": "revision.authorName of the source control user", + "revision.authorEmail": "revision.authorEmail of the source control user", + "revision.committerName": "revision.committerName of the source control user", + "revision.committerEmail": "revision.committerEmail of the source control user", +} + +func (BinaryBuildRequestOptions) SwaggerDoc() map[string]string { + return map_BinaryBuildRequestOptions +} + +var map_BinaryBuildSource = map[string]string{ + "": "BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, where the file will be extracted and used as the build source.", + "asFile": "asFile indicates that the provided binary input should be considered a single file within the build input. For example, specifying \"webapp.war\" would place the provided binary as `/webapp.war` for the builder. If left empty, the Docker and Source build strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. The custom strategy receives this binary as standard input. This filename may not contain slashes or be '..' or '.'.", +} + +func (BinaryBuildSource) SwaggerDoc() map[string]string { + return map_BinaryBuildSource +} + +var map_BitbucketWebHookCause = map[string]string{ + "": "BitbucketWebHookCause has information about a Bitbucket webhook that triggered a build.", +} + +func (BitbucketWebHookCause) SwaggerDoc() map[string]string { + return map_BitbucketWebHookCause +} + +var map_Build = map[string]string{ + "": "Build encapsulates the inputs needed to produce a new deployable image, as well as the status of the execution and a reference to the Pod which executed the build.", + "spec": "spec is all the inputs used to execute the build.", + "status": "status is the current status of the build.", +} + +func (Build) SwaggerDoc() map[string]string { + return map_Build +} + +var map_BuildCondition = map[string]string{ + "": "BuildCondition describes the state of a build at a certain point.", + "type": "Type of build condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (BuildCondition) SwaggerDoc() map[string]string { + return map_BuildCondition +} + +var map_BuildConfig = map[string]string{ + "": "Build configurations define a build process for new container images. There are three types of builds possible - a container image build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary container images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the container image registry specified in the \"output\" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created.\n\nEach build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have \"output\" set can be used to test code or run a verification build.", + "spec": "spec holds all the input necessary to produce a new build, and the conditions when to trigger them.", + "status": "status holds any relevant information about a build config", +} + +func (BuildConfig) SwaggerDoc() map[string]string { + return map_BuildConfig +} + +var map_BuildConfigList = map[string]string{ + "": "BuildConfigList is a collection of BuildConfigs.", + "items": "items is a list of build configs", +} + +func (BuildConfigList) SwaggerDoc() map[string]string { + return map_BuildConfigList +} + +var map_BuildConfigSpec = map[string]string{ + "": "BuildConfigSpec describes when and how builds are created", + "triggers": "triggers determine how new Builds can be launched from a BuildConfig. If no triggers are defined, a new build can only occur as a result of an explicit client build creation.", + "runPolicy": "RunPolicy describes how the new build created from this build configuration will be scheduled for execution. This is optional, if not specified we default to \"Serial\".", + "successfulBuildsHistoryLimit": "successfulBuildsHistoryLimit is the number of old successful builds to retain. When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. If removed after the BuildConfig has been created, all successful builds are retained.", + "failedBuildsHistoryLimit": "failedBuildsHistoryLimit is the number of old failed builds to retain. When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. If removed after the BuildConfig has been created, all failed builds are retained.", +} + +func (BuildConfigSpec) SwaggerDoc() map[string]string { + return map_BuildConfigSpec +} + +var map_BuildConfigStatus = map[string]string{ + "": "BuildConfigStatus contains current state of the build config object.", + "lastVersion": "lastVersion is used to inform about number of last triggered build.", +} + +func (BuildConfigStatus) SwaggerDoc() map[string]string { + return map_BuildConfigStatus +} + +var map_BuildList = map[string]string{ + "": "BuildList is a collection of Builds.", + "items": "items is a list of builds", +} + +func (BuildList) SwaggerDoc() map[string]string { + return map_BuildList +} + +var map_BuildLog = map[string]string{ + "": "BuildLog is the (unused) resource associated with the build log redirector", +} + +func (BuildLog) SwaggerDoc() map[string]string { + return map_BuildLog +} + +var map_BuildLogOptions = map[string]string{ + "": "BuildLogOptions is the REST options for a build log", + "container": "cointainer for which to stream logs. Defaults to only container if there is one container in the pod.", + "follow": "follow if true indicates that the build log should be streamed until the build terminates.", + "previous": "previous returns previous build logs. Defaults to false.", + "sinceSeconds": "sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "sinceTime": "sinceTime is an RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "timestamps": "timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", + "tailLines": "tailLines, If set, is the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "limitBytes": "limitBytes, If set, is the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", + "nowait": "noWait if true causes the call to return immediately even if the build is not available yet. Otherwise the server will wait until the build has started.", + "version": "version of the build for which to view logs.", +} + +func (BuildLogOptions) SwaggerDoc() map[string]string { + return map_BuildLogOptions +} + +var map_BuildOutput = map[string]string{ + "": "BuildOutput is input to a build strategy and describes the container image that the strategy should produce.", + "to": "to defines an optional location to push the output of this build to. Kind must be one of 'ImageStreamTag' or 'DockerImage'. This value will be used to look up a container image repository to push to. In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of the build unless Namespace is specified.", + "pushSecret": "PushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub).", + "imageLabels": "imageLabels define a list of labels that are applied to the resulting image. If there are multiple labels with the same name then the last one in the list is used.", +} + +func (BuildOutput) SwaggerDoc() map[string]string { + return map_BuildOutput +} + +var map_BuildPostCommitSpec = map[string]string{ + "": "A BuildPostCommitSpec holds a build post commit hook specification. The hook executes a command in a temporary container running the build output image, immediately after the last layer of the image is committed and before the image is pushed to a registry. The command is executed with the current working directory ($PWD) set to the image's WORKDIR.\n\nThe build will be marked as failed if the hook execution fails. It will fail if the script or command return a non-zero exit code, or if there is any other error related to starting the temporary container.\n\nThere are five different ways to configure the hook. As an example, all forms below are equivalent and will execute `rake test --verbose`.\n\n1. Shell script:\n\n \"postCommit\": {\n \"script\": \"rake test --verbose\",\n }\n\n The above is a convenient form which is equivalent to:\n\n \"postCommit\": {\n \"command\": [\"/bin/sh\", \"-ic\"],\n \"args\": [\"rake test --verbose\"]\n }\n\n2. A command as the image entrypoint:\n\n \"postCommit\": {\n \"commit\": [\"rake\", \"test\", \"--verbose\"]\n }\n\n Command overrides the image entrypoint in the exec form, as documented in\n Docker: https://docs.docker.com/engine/reference/builder/#entrypoint.\n\n3. Pass arguments to the default entrypoint:\n\n \"postCommit\": {\n\t\t \"args\": [\"rake\", \"test\", \"--verbose\"]\n\t }\n\n This form is only useful if the image entrypoint can handle arguments.\n\n4. Shell script with arguments:\n\n \"postCommit\": {\n \"script\": \"rake test $1\",\n \"args\": [\"--verbose\"]\n }\n\n This form is useful if you need to pass arguments that would otherwise be\n hard to quote properly in the shell script. In the script, $0 will be\n \"/bin/sh\" and $1, $2, etc, are the positional arguments from Args.\n\n5. Command with arguments:\n\n \"postCommit\": {\n \"command\": [\"rake\", \"test\"],\n \"args\": [\"--verbose\"]\n }\n\n This form is equivalent to appending the arguments to the Command slice.\n\nIt is invalid to provide both Script and Command simultaneously. If none of the fields are specified, the hook is not executed.", + "command": "command is the command to run. It may not be specified with Script. This might be needed if the image doesn't have `/bin/sh`, or if you do not want to use a shell. In all other cases, using Script might be more convenient.", + "args": "args is a list of arguments that are provided to either Command, Script or the container image's default entrypoint. The arguments are placed immediately after the command to be run.", + "script": "script is a shell script to be run with `/bin/sh -ic`. It may not be specified with Command. Use Script when a shell script is appropriate to execute the post build hook, for example for running unit tests with `rake test`. If you need control over the image entrypoint, or if the image does not have `/bin/sh`, use Command and/or Args. The `-i` flag is needed to support CentOS and RHEL images that use Software Collections (SCL), in order to have the appropriate collections enabled in the shell. E.g., in the Ruby image, this is necessary to make `ruby`, `bundle` and other binaries available in the PATH.", +} + +func (BuildPostCommitSpec) SwaggerDoc() map[string]string { + return map_BuildPostCommitSpec +} + +var map_BuildRequest = map[string]string{ + "": "BuildRequest is the resource used to pass parameters to build generator", + "revision": "revision is the information from the source for a specific repo snapshot.", + "triggeredByImage": "triggeredByImage is the Image that triggered this build.", + "from": "from is the reference to the ImageStreamTag that triggered the build.", + "binary": "binary indicates a request to build from a binary provided to the builder", + "lastVersion": "lastVersion (optional) is the LastVersion of the BuildConfig that was used to generate the build. If the BuildConfig in the generator doesn't match, a build will not be generated.", + "env": "env contains additional environment variables you want to pass into a builder container.", + "triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.", + "dockerStrategyOptions": "DockerStrategyOptions contains additional docker-strategy specific options for the build", + "sourceStrategyOptions": "SourceStrategyOptions contains additional source-strategy specific options for the build", +} + +func (BuildRequest) SwaggerDoc() map[string]string { + return map_BuildRequest +} + +var map_BuildSource = map[string]string{ + "": "BuildSource is the SCM used for the build.", + "type": "type of build input to accept", + "binary": "binary builds accept a binary as their input. The binary is generally assumed to be a tar, gzipped tar, or zip file depending on the strategy. For container image builds, this is the build context and an optional Dockerfile may be specified to override any Dockerfile in the build context. For Source builds, this is assumed to be an archive as described above. For Source and container image builds, if binary.asFile is set the build will receive a directory with a single file. contextDir may be used when an archive is provided. Custom builds will receive this binary as input on STDIN.", + "dockerfile": "dockerfile is the raw contents of a Dockerfile which should be built. When this option is specified, the FROM may be modified based on your strategy base image and additional ENV stanzas from your strategy environment will be added after the FROM, but before the rest of your Dockerfile stanzas. The Dockerfile source type may be used with other options like git - in those cases the Git repo will have any innate Dockerfile replaced in the context dir.", + "git": "git contains optional information about git build source", + "images": "images describes a set of images to be used to provide source for the build", + "contextDir": "contextDir specifies the sub-directory where the source code for the application exists. This allows to have buildable sources in directory other than root of repository.", + "sourceSecret": "sourceSecret is the name of a Secret that would be used for setting up the authentication for cloning private repository. The secret contains valid credentials for remote repository, where the data's key represent the authentication method to be used and value is the base64 encoded credentials. Supported auth methods are: ssh-privatekey.", + "secrets": "secrets represents a list of secrets and their destinations that will be used only for the build.", + "configMaps": "configMaps represents a list of configMaps and their destinations that will be used for the build.", +} + +func (BuildSource) SwaggerDoc() map[string]string { + return map_BuildSource +} + +var map_BuildSpec = map[string]string{ + "": "BuildSpec has the information to represent a build and also additional information about a build", + "triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.", +} + +func (BuildSpec) SwaggerDoc() map[string]string { + return map_BuildSpec +} + +var map_BuildStatus = map[string]string{ + "": "BuildStatus contains the status of a build", + "phase": "phase is the point in the build lifecycle. Possible values are \"New\", \"Pending\", \"Running\", \"Complete\", \"Failed\", \"Error\", and \"Cancelled\".", + "cancelled": "cancelled describes if a cancel event was triggered for the build.", + "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", + "message": "message is a human-readable message indicating details about why the build has this status.", + "startTimestamp": "startTimestamp is a timestamp representing the server time when this Build started running in a Pod. It is represented in RFC3339 form and is in UTC.", + "completionTimestamp": "completionTimestamp is a timestamp representing the server time when this Build was finished, whether that build failed or succeeded. It reflects the time at which the Pod running the Build terminated. It is represented in RFC3339 form and is in UTC.", + "duration": "duration contains time.Duration object describing build time.", + "outputDockerImageReference": "outputDockerImageReference contains a reference to the container image that will be built by this build. Its value is computed from Build.Spec.Output.To, and should include the registry address, so that it can be used to push and pull the image.", + "config": "config is an ObjectReference to the BuildConfig this Build is based on.", + "output": "output describes the container image the build has produced.", + "stages": "stages contains details about each stage that occurs during the build including start time, duration (in milliseconds), and the steps that occured within each stage.", + "logSnippet": "logSnippet is the last few lines of the build log. This value is only set for builds that failed.", + "conditions": "Conditions represents the latest available observations of a build's current state.", +} + +func (BuildStatus) SwaggerDoc() map[string]string { + return map_BuildStatus +} + +var map_BuildStatusOutput = map[string]string{ + "": "BuildStatusOutput contains the status of the built image.", + "to": "to describes the status of the built image being pushed to a registry.", +} + +func (BuildStatusOutput) SwaggerDoc() map[string]string { + return map_BuildStatusOutput +} + +var map_BuildStatusOutputTo = map[string]string{ + "": "BuildStatusOutputTo describes the status of the built image with regards to image registry to which it was supposed to be pushed.", + "imageDigest": "imageDigest is the digest of the built container image. The digest uniquely identifies the image in the registry to which it was pushed.\n\nPlease note that this field may not always be set even if the push completes successfully - e.g. when the registry returns no digest or returns it in a format that the builder doesn't understand.", +} + +func (BuildStatusOutputTo) SwaggerDoc() map[string]string { + return map_BuildStatusOutputTo +} + +var map_BuildStrategy = map[string]string{ + "": "BuildStrategy contains the details of how to perform a build.", + "type": "type is the kind of build strategy.", + "dockerStrategy": "dockerStrategy holds the parameters to the container image build strategy.", + "sourceStrategy": "sourceStrategy holds the parameters to the Source build strategy.", + "customStrategy": "customStrategy holds the parameters to the Custom build strategy", + "jenkinsPipelineStrategy": "JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. Deprecated: use OpenShift Pipelines", +} + +func (BuildStrategy) SwaggerDoc() map[string]string { + return map_BuildStrategy +} + +var map_BuildTriggerCause = map[string]string{ + "": "BuildTriggerCause holds information about a triggered build. It is used for displaying build trigger data for each build and build configuration in oc describe. It is also used to describe which triggers led to the most recent update in the build configuration.", + "message": "message is used to store a human readable message for why the build was triggered. E.g.: \"Manually triggered by user\", \"Configuration change\",etc.", + "genericWebHook": "genericWebHook holds data about a builds generic webhook trigger.", + "githubWebHook": "gitHubWebHook represents data for a GitHub webhook that fired a specific build.", + "imageChangeBuild": "imageChangeBuild stores information about an imagechange event that triggered a new build.", + "gitlabWebHook": "GitLabWebHook represents data for a GitLab webhook that fired a specific build.", + "bitbucketWebHook": "BitbucketWebHook represents data for a Bitbucket webhook that fired a specific build.", +} + +func (BuildTriggerCause) SwaggerDoc() map[string]string { + return map_BuildTriggerCause +} + +var map_BuildTriggerPolicy = map[string]string{ + "": "BuildTriggerPolicy describes a policy for a single trigger that results in a new Build.", + "type": "type is the type of build trigger", + "github": "github contains the parameters for a GitHub webhook type of trigger", + "generic": "generic contains the parameters for a Generic webhook type of trigger", + "imageChange": "imageChange contains parameters for an ImageChange type of trigger", + "gitlab": "GitLabWebHook contains the parameters for a GitLab webhook type of trigger", + "bitbucket": "BitbucketWebHook contains the parameters for a Bitbucket webhook type of trigger", +} + +func (BuildTriggerPolicy) SwaggerDoc() map[string]string { + return map_BuildTriggerPolicy +} + +var map_CommonSpec = map[string]string{ + "": "CommonSpec encapsulates all the inputs necessary to represent a build.", + "serviceAccount": "serviceAccount is the name of the ServiceAccount to use to run the pod created by this build. The pod will be allowed to use secrets referenced by the ServiceAccount", + "source": "source describes the SCM in use.", + "revision": "revision is the information from the source for a specific repo snapshot. This is optional.", + "strategy": "strategy defines how to perform a build.", + "output": "output describes the container image the Strategy should produce.", + "resources": "resources computes resource requirements to execute the build.", + "postCommit": "postCommit is a build hook executed after the build output image is committed, before it is pushed to a registry.", + "completionDeadlineSeconds": "completionDeadlineSeconds is an optional duration in seconds, counted from the time when a build pod gets scheduled in the system, that the build may be active on a node before the system actively tries to terminate the build; value must be positive integer", + "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node If nil, it can be overridden by default build nodeselector values for the cluster. If set to an empty map or a map with any values, default build nodeselector values are ignored.", +} + +func (CommonSpec) SwaggerDoc() map[string]string { + return map_CommonSpec +} + +var map_CommonWebHookCause = map[string]string{ + "": "CommonWebHookCause factors out the identical format of these webhook causes into struct so we can share it in the specific causes; it is too late for GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket.", + "revision": "Revision is the git source revision information of the trigger.", + "secret": "Secret is the obfuscated webhook secret that triggered a build.", +} + +func (CommonWebHookCause) SwaggerDoc() map[string]string { + return map_CommonWebHookCause +} + +var map_ConfigMapBuildSource = map[string]string{ + "": "ConfigMapBuildSource describes a configmap and its destination directory that will be used only at the build time. The content of the configmap referenced here will be copied into the destination directory instead of mounting.", + "configMap": "configMap is a reference to an existing configmap that you want to use in your build.", + "destinationDir": "destinationDir is the directory where the files from the configmap should be available for the build time. For the Source build strategy, these will be injected into a container where the assemble script runs. For the container image build strategy, these will be copied into the build directory, where the Dockerfile is located, so users can ADD or COPY them during container image build.", +} + +func (ConfigMapBuildSource) SwaggerDoc() map[string]string { + return map_ConfigMapBuildSource +} + +var map_CustomBuildStrategy = map[string]string{ + "": "CustomBuildStrategy defines input parameters specific to Custom build.", + "from": "from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the container image should be pulled", + "pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the container images from the private Docker registries", + "env": "env contains additional environment variables you want to pass into a builder container.", + "exposeDockerSocket": "exposeDockerSocket will allow running Docker commands (and build container images) from inside the container.", + "forcePull": "forcePull describes if the controller should configure the build pod to always pull the images for the builder or only pull if it is not present locally", + "secrets": "secrets is a list of additional secrets that will be included in the build pod", + "buildAPIVersion": "buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder", +} + +func (CustomBuildStrategy) SwaggerDoc() map[string]string { + return map_CustomBuildStrategy +} + +var map_DockerBuildStrategy = map[string]string{ + "": "DockerBuildStrategy defines input parameters specific to container image build.", + "from": "from is a reference to an DockerImage, ImageStreamTag, or ImageStreamImage which overrides the FROM image in the Dockerfile for the build. If the Dockerfile uses multi-stage builds, this will replace the image in the last FROM directive of the file.", + "pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the container images from the private Docker registries", + "noCache": "noCache if set to true indicates that the container image build must be executed with the --no-cache=true flag", + "env": "env contains additional environment variables you want to pass into a builder container.", + "forcePull": "forcePull describes if the builder should pull the images from registry prior to building.", + "dockerfilePath": "dockerfilePath is the path of the Dockerfile that will be used to build the container image, relative to the root of the context (contextDir).", + "buildArgs": "buildArgs contains build arguments that will be resolved in the Dockerfile. See https://docs.docker.com/engine/reference/builder/#/arg for more details.", + "imageOptimizationPolicy": "imageOptimizationPolicy describes what optimizations the system can use when building images to reduce the final size or time spent building the image. The default policy is 'None' which means the final build image will be equivalent to an image created by the container image build API. The experimental policy 'SkipLayers' will avoid commiting new layers in between each image step, and will fail if the Dockerfile cannot provide compatibility with the 'None' policy. An additional experimental policy 'SkipLayersAndWarn' is the same as 'SkipLayers' but simply warns if compatibility cannot be preserved.", +} + +func (DockerBuildStrategy) SwaggerDoc() map[string]string { + return map_DockerBuildStrategy +} + +var map_DockerStrategyOptions = map[string]string{ + "": "DockerStrategyOptions contains extra strategy options for container image builds", + "buildArgs": "Args contains any build arguments that are to be passed to Docker. See https://docs.docker.com/engine/reference/builder/#/arg for more details", + "noCache": "noCache overrides the docker-strategy noCache option in the build config", +} + +func (DockerStrategyOptions) SwaggerDoc() map[string]string { + return map_DockerStrategyOptions +} + +var map_GenericWebHookCause = map[string]string{ + "": "GenericWebHookCause holds information about a generic WebHook that triggered a build.", + "revision": "revision is an optional field that stores the git source revision information of the generic webhook trigger when it is available.", + "secret": "secret is the obfuscated webhook secret that triggered a build.", +} + +func (GenericWebHookCause) SwaggerDoc() map[string]string { + return map_GenericWebHookCause +} + +var map_GenericWebHookEvent = map[string]string{ + "": "GenericWebHookEvent is the payload expected for a generic webhook post", + "type": "type is the type of source repository", + "git": "git is the git information if the Type is BuildSourceGit", + "env": "env contains additional environment variables you want to pass into a builder container. ValueFrom is not supported.", + "dockerStrategyOptions": "DockerStrategyOptions contains additional docker-strategy specific options for the build", +} + +func (GenericWebHookEvent) SwaggerDoc() map[string]string { + return map_GenericWebHookEvent +} + +var map_GitBuildSource = map[string]string{ + "": "GitBuildSource defines the parameters of a Git SCM", + "uri": "uri points to the source that will be built. The structure of the source will depend on the type of build to run", + "ref": "ref is the branch/tag/ref to build.", +} + +func (GitBuildSource) SwaggerDoc() map[string]string { + return map_GitBuildSource +} + +var map_GitHubWebHookCause = map[string]string{ + "": "GitHubWebHookCause has information about a GitHub webhook that triggered a build.", + "revision": "revision is the git revision information of the trigger.", + "secret": "secret is the obfuscated webhook secret that triggered a build.", +} + +func (GitHubWebHookCause) SwaggerDoc() map[string]string { + return map_GitHubWebHookCause +} + +var map_GitInfo = map[string]string{ + "": "GitInfo is the aggregated git information for a generic webhook post", + "refs": "Refs is a list of GitRefs for the provided repo - generally sent when used from a post-receive hook. This field is optional and is used when sending multiple refs", +} + +func (GitInfo) SwaggerDoc() map[string]string { + return map_GitInfo +} + +var map_GitLabWebHookCause = map[string]string{ + "": "GitLabWebHookCause has information about a GitLab webhook that triggered a build.", +} + +func (GitLabWebHookCause) SwaggerDoc() map[string]string { + return map_GitLabWebHookCause +} + +var map_GitRefInfo = map[string]string{ + "": "GitRefInfo is a single ref", +} + +func (GitRefInfo) SwaggerDoc() map[string]string { + return map_GitRefInfo +} + +var map_GitSourceRevision = map[string]string{ + "": "GitSourceRevision is the commit information from a git source for a build", + "commit": "commit is the commit hash identifying a specific commit", + "author": "author is the author of a specific commit", + "committer": "committer is the committer of a specific commit", + "message": "message is the description of a specific commit", +} + +func (GitSourceRevision) SwaggerDoc() map[string]string { + return map_GitSourceRevision +} + +var map_ImageChangeCause = map[string]string{ + "": "ImageChangeCause contains information about the image that triggered a build", + "imageID": "imageID is the ID of the image that triggered a a new build.", + "fromRef": "fromRef contains detailed information about an image that triggered a build.", +} + +func (ImageChangeCause) SwaggerDoc() map[string]string { + return map_ImageChangeCause +} + +var map_ImageChangeTrigger = map[string]string{ + "": "ImageChangeTrigger allows builds to be triggered when an ImageStream changes", + "lastTriggeredImageID": "lastTriggeredImageID is used internally by the ImageChangeController to save last used image ID for build", + "from": "from is a reference to an ImageStreamTag that will trigger a build when updated It is optional. If no From is specified, the From image from the build strategy will be used. Only one ImageChangeTrigger with an empty From reference is allowed in a build configuration.", + "paused": "paused is true if this trigger is temporarily disabled. Optional.", +} + +func (ImageChangeTrigger) SwaggerDoc() map[string]string { + return map_ImageChangeTrigger +} + +var map_ImageLabel = map[string]string{ + "": "ImageLabel represents a label applied to the resulting image.", + "name": "name defines the name of the label. It must have non-zero length.", + "value": "value defines the literal value of the label.", +} + +func (ImageLabel) SwaggerDoc() map[string]string { + return map_ImageLabel +} + +var map_ImageSource = map[string]string{ + "": "ImageSource is used to describe build source that will be extracted from an image or used during a multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. A pull secret can be specified to pull the image from an external registry or override the default service account secret if pulling from the internal registry. Image sources can either be used to extract content from an image and place it into the build context along with the repository source, or used directly during a multi-stage container image build to allow content to be copied without overwriting the contents of the repository source (see the 'paths' and 'as' fields).", + "from": "from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to copy source from.", + "as": "A list of image names that this source will be used in place of during a multi-stage container image build. For instance, a Dockerfile that uses \"COPY --from=nginx:latest\" will first check for an image source that has \"nginx:latest\" in this field before attempting to pull directly. If the Dockerfile does not reference an image source it is ignored. This field and paths may both be set, in which case the contents will be used twice.", + "paths": "paths is a list of source and destination paths to copy from the image. This content will be copied into the build context prior to starting the build. If no paths are set, the build context will not be altered.", + "pullSecret": "pullSecret is a reference to a secret to be used to pull the image from a registry If the image is pulled from the OpenShift registry, this field does not need to be set.", +} + +func (ImageSource) SwaggerDoc() map[string]string { + return map_ImageSource +} + +var map_ImageSourcePath = map[string]string{ + "": "ImageSourcePath describes a path to be copied from a source image and its destination within the build directory.", + "sourcePath": "sourcePath is the absolute path of the file or directory inside the image to copy to the build directory. If the source path ends in /. then the content of the directory will be copied, but the directory itself will not be created at the destination.", + "destinationDir": "destinationDir is the relative directory within the build directory where files copied from the image are placed.", +} + +func (ImageSourcePath) SwaggerDoc() map[string]string { + return map_ImageSourcePath +} + +var map_JenkinsPipelineBuildStrategy = map[string]string{ + "": "JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. Deprecated: use OpenShift Pipelines", + "jenkinsfilePath": "JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.", + "jenkinsfile": "Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.", + "env": "env contains additional environment variables you want to pass into a build pipeline.", +} + +func (JenkinsPipelineBuildStrategy) SwaggerDoc() map[string]string { + return map_JenkinsPipelineBuildStrategy +} + +var map_ProxyConfig = map[string]string{ + "": "ProxyConfig defines what proxies to use for an operation", + "httpProxy": "httpProxy is a proxy used to reach the git repository over http", + "httpsProxy": "httpsProxy is a proxy used to reach the git repository over https", + "noProxy": "noProxy is the list of domains for which the proxy should not be used", +} + +func (ProxyConfig) SwaggerDoc() map[string]string { + return map_ProxyConfig +} + +var map_SecretBuildSource = map[string]string{ + "": "SecretBuildSource describes a secret and its destination directory that will be used only at the build time. The content of the secret referenced here will be copied into the destination directory instead of mounting.", + "secret": "secret is a reference to an existing secret that you want to use in your build.", + "destinationDir": "destinationDir is the directory where the files from the secret should be available for the build time. For the Source build strategy, these will be injected into a container where the assemble script runs. Later, when the script finishes, all files injected will be truncated to zero length. For the container image build strategy, these will be copied into the build directory, where the Dockerfile is located, so users can ADD or COPY them during container image build.", +} + +func (SecretBuildSource) SwaggerDoc() map[string]string { + return map_SecretBuildSource +} + +var map_SecretLocalReference = map[string]string{ + "": "SecretLocalReference contains information that points to the local secret being used", + "name": "Name is the name of the resource in the same namespace being referenced", +} + +func (SecretLocalReference) SwaggerDoc() map[string]string { + return map_SecretLocalReference +} + +var map_SecretSpec = map[string]string{ + "": "SecretSpec specifies a secret to be included in a build pod and its corresponding mount point", + "secretSource": "secretSource is a reference to the secret", + "mountPath": "mountPath is the path at which to mount the secret", +} + +func (SecretSpec) SwaggerDoc() map[string]string { + return map_SecretSpec +} + +var map_SourceBuildStrategy = map[string]string{ + "": "SourceBuildStrategy defines input parameters specific to an Source build.", + "from": "from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the container image should be pulled", + "pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the container images from the private Docker registries", + "env": "env contains additional environment variables you want to pass into a builder container.", + "scripts": "scripts is the location of Source scripts", + "incremental": "incremental flag forces the Source build to do incremental builds if true.", + "forcePull": "forcePull describes if the builder should pull the images from registry prior to building.", +} + +func (SourceBuildStrategy) SwaggerDoc() map[string]string { + return map_SourceBuildStrategy +} + +var map_SourceControlUser = map[string]string{ + "": "SourceControlUser defines the identity of a user of source control", + "name": "name of the source control user", + "email": "email of the source control user", +} + +func (SourceControlUser) SwaggerDoc() map[string]string { + return map_SourceControlUser +} + +var map_SourceRevision = map[string]string{ + "": "SourceRevision is the revision or commit information from the source for the build", + "type": "type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images'", + "git": "Git contains information about git-based build source", +} + +func (SourceRevision) SwaggerDoc() map[string]string { + return map_SourceRevision +} + +var map_SourceStrategyOptions = map[string]string{ + "": "SourceStrategyOptions contains extra strategy options for Source builds", + "incremental": "incremental overrides the source-strategy incremental option in the build config", +} + +func (SourceStrategyOptions) SwaggerDoc() map[string]string { + return map_SourceStrategyOptions +} + +var map_StageInfo = map[string]string{ + "": "StageInfo contains details about a build stage.", + "name": "name is a unique identifier for each build stage that occurs.", + "startTime": "startTime is a timestamp representing the server time when this Stage started. It is represented in RFC3339 form and is in UTC.", + "durationMilliseconds": "durationMilliseconds identifies how long the stage took to complete in milliseconds. Note: the duration of a stage can exceed the sum of the duration of the steps within the stage as not all actions are accounted for in explicit build steps.", + "steps": "steps contains details about each step that occurs during a build stage including start time and duration in milliseconds.", +} + +func (StageInfo) SwaggerDoc() map[string]string { + return map_StageInfo +} + +var map_StepInfo = map[string]string{ + "": "StepInfo contains details about a build step.", + "name": "name is a unique identifier for each build step.", + "startTime": "startTime is a timestamp representing the server time when this Step started. it is represented in RFC3339 form and is in UTC.", + "durationMilliseconds": "durationMilliseconds identifies how long the step took to complete in milliseconds.", +} + +func (StepInfo) SwaggerDoc() map[string]string { + return map_StepInfo +} + +var map_WebHookTrigger = map[string]string{ + "": "WebHookTrigger is a trigger that gets invoked using a webhook type of post", + "secret": "secret used to validate requests. Deprecated: use SecretReference instead.", + "allowEnv": "allowEnv determines whether the webhook can set environment variables; can only be set to true for GenericWebHook.", + "secretReference": "secretReference is a reference to a secret in the same namespace, containing the value to be validated when the webhook is invoked. The secret being referenced must contain a key named \"WebHookSecretKey\", the value of which will be checked against the value supplied in the webhook invocation.", +} + +func (WebHookTrigger) SwaggerDoc() map[string]string { + return map_WebHookTrigger +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/config/install.go b/vendor/github.com/openshift/api/config/install.go new file mode 100644 index 0000000000000000000000000000000000000000..9acfa4203bce9a6f588841abb943493b9bdc78fc --- /dev/null +++ b/vendor/github.com/openshift/api/config/install.go @@ -0,0 +1,26 @@ +package config + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + configv1 "github.com/openshift/api/config/v1" +) + +const ( + GroupName = "config.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(configv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml index afd0767479e2a3b86af82e7cbf1c147ae59a3ca7..cf04b249b9831c1911e3f4da32304852bbf63571 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml @@ -62,17 +62,19 @@ spec: type: string trustedCA: description: "trustedCA is a reference to a ConfigMap containing a CA - certificate bundle used for client egress HTTPS connections. The certificate - bundle must be from the CA that signed the proxy's certificate and - be signed for everything. The trustedCA field should only be consumed - by a proxy validator. The validator is responsible for reading the - certificate bundle from required key \"ca-bundle.crt\" and copying - it to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" - namespace. The namespace for the ConfigMap referenced by trustedCA - is \"openshift-config\". Here is an example ConfigMap (in yaml): \n - apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace: - openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- - \ Custom CA certificate bundle. -----END CERTIFICATE-----" + certificate bundle. The trustedCA field should only be consumed by + a proxy validator. The validator is responsible for reading the certificate + bundle from the required key \"ca-bundle.crt\", merging it with the + system default trust bundle, and writing the merged trust bundle to + a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" + namespace. Clients that expect to make proxy connections must use + the trusted-ca-bundle for all HTTPS requests to the proxy, and may + use the trusted-ca-bundle for non-proxy HTTPS requests as well. \n + The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". + Here is an example ConfigMap (in yaml): \n apiVersion: v1 kind: ConfigMap + metadata: name: user-ca-bundle namespace: openshift-config data: + \ ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom + CA certificate bundle. -----END CERTIFICATE-----" type: object required: - name diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml index 4e1fdac3704d557fc8e6e00d41326d47e1b3164f..ebabc90095106ce8582512c48c9a9c9f97e2539c 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml @@ -165,6 +165,11 @@ spec: \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" type: string + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 nullable: true intermediate: description: "intermediate is a TLS security profile based on: \n @@ -215,5 +220,10 @@ spec: profile is currently not supported because it is not yet well adopted by common software libraries." type: string + enum: + - Old + - Intermediate + - Modern + - Custom status: type: object diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml index bb95918d9dad2d2373a5e1048d46a9e897280440..0fbf020bd81a167a91e5e8566f6bd94a156a46c6 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml @@ -64,7 +64,7 @@ spec: type: string serviceAccountIssuer: description: serviceAccountIssuer is the identifier of the bound service - account token issuer. The default is auth.openshift.io. + account token issuer. The default is https://kubernetes.default.svc type: string type: description: type identifies the cluster managed, user facing authentication diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml index 8f75839712304d9ba8147c1e85768751cb89c508..f3957ce34a42b1678a504cf5ecf819cc2da21173 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml @@ -89,19 +89,21 @@ spec: type: string trustedCA: description: "trustedCA is a reference to a ConfigMap containing - a CA certificate bundle used for client egress HTTPS connections. - The certificate bundle must be from the CA that signed the - proxy's certificate and be signed for everything. The trustedCA - field should only be consumed by a proxy validator. The validator - is responsible for reading the certificate bundle from required - key \"ca-bundle.crt\" and copying it to a ConfigMap named - \"trusted-ca-bundle\" in the \"openshift-config-managed\" - namespace. The namespace for the ConfigMap referenced by trustedCA - is \"openshift-config\". Here is an example ConfigMap (in - yaml): \n apiVersion: v1 kind: ConfigMap metadata: name: - user-ca-bundle namespace: openshift-config data: ca-bundle.crt: - | -----BEGIN CERTIFICATE----- Custom CA certificate - bundle. -----END CERTIFICATE-----" + a CA certificate bundle. The trustedCA field should only be + consumed by a proxy validator. The validator is responsible + for reading the certificate bundle from the required key \"ca-bundle.crt\", + merging it with the system default trust bundle, and writing + the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" + in the \"openshift-config-managed\" namespace. Clients that + expect to make proxy connections must use the trusted-ca-bundle + for all HTTPS requests to the proxy, and may use the trusted-ca-bundle + for non-proxy HTTPS requests as well. \n The namespace for + the ConfigMap referenced by trustedCA is \"openshift-config\". + Here is an example ConfigMap (in yaml): \n apiVersion: v1 + kind: ConfigMap metadata: name: user-ca-bundle namespace: + openshift-config data: ca-bundle.crt: | -----BEGIN + CERTIFICATE----- Custom CA certificate bundle. -----END + CERTIFICATE-----" type: object required: - name @@ -242,19 +244,21 @@ spec: type: string trustedCA: description: "trustedCA is a reference to a ConfigMap containing - a CA certificate bundle used for client egress HTTPS connections. - The certificate bundle must be from the CA that signed the - proxy's certificate and be signed for everything. The trustedCA - field should only be consumed by a proxy validator. The validator - is responsible for reading the certificate bundle from required - key \"ca-bundle.crt\" and copying it to a ConfigMap named - \"trusted-ca-bundle\" in the \"openshift-config-managed\" - namespace. The namespace for the ConfigMap referenced by trustedCA - is \"openshift-config\". Here is an example ConfigMap (in - yaml): \n apiVersion: v1 kind: ConfigMap metadata: name: - user-ca-bundle namespace: openshift-config data: ca-bundle.crt: - | -----BEGIN CERTIFICATE----- Custom CA certificate - bundle. -----END CERTIFICATE-----" + a CA certificate bundle. The trustedCA field should only be + consumed by a proxy validator. The validator is responsible + for reading the certificate bundle from the required key \"ca-bundle.crt\", + merging it with the system default trust bundle, and writing + the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" + in the \"openshift-config-managed\" namespace. Clients that + expect to make proxy connections must use the trusted-ca-bundle + for all HTTPS requests to the proxy, and may use the trusted-ca-bundle + for non-proxy HTTPS requests as well. \n The namespace for + the ConfigMap referenced by trustedCA is \"openshift-config\". + Here is an example ConfigMap (in yaml): \n apiVersion: v1 + kind: ConfigMap metadata: name: user-ca-bundle namespace: + openshift-config data: ca-bundle.crt: | -----BEGIN + CERTIFICATE----- Custom CA certificate bundle. -----END + CERTIFICATE-----" type: object required: - name diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go index df2cdf2b9aeec3b5f45f11455795e7fe07648f32..4f87bd5219d7112a50449d5583579479a15ddc04 100644 --- a/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -53,7 +53,7 @@ type AuthenticationSpec struct { // serviceAccountIssuer is the identifier of the bound service account token // issuer. - // The default is auth.openshift.io. + // The default is https://kubernetes.default.svc // +optional ServiceAccountIssuer string `json:"serviceAccountIssuer"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index 9a6cd4ee04621914ebcb0a73f50387189aec92b0..ce90126272901237a66b0855db0a569e1fcdc07b 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -117,7 +117,6 @@ var defaultFeatures = &FeatureGateEnabledDisabled{ "NodeDisruptionExclusion", // sig-scheduling, ccoleman "ServiceNodeExclusion", // sig-scheduling, ccoleman "SCTPSupport", // sig-network, ccallend - "IPv6DualStack", // sig-network, ccoleman }, Disabled: []string{ "LegacyNodeRoleBehavior", // sig-scheduling, ccoleman diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go index 1d998bf37d9a464af6f208667ed9cbdae72eca33..1b2b7f82e9e99a3fd5bfd98f4e55cadfff00d6c2 100644 --- a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go +++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go @@ -37,6 +37,7 @@ type OperatorHubStatus struct { // the state of the default hub sources for OperatorHub on the cluster from // enabled to disabled and vice versa. // +kubebuilder:subresource:status +// +genclient // +genclient:nonNamespaced type OperatorHub struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go index 93f4c487e49c84475d900879d82288ce6d5017ce..211e501e08cbe481555a7cf6a427014b7fa984d1 100644 --- a/vendor/github.com/openshift/api/config/v1/types_proxy.go +++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go @@ -41,13 +41,17 @@ type ProxySpec struct { // +optional ReadinessEndpoints []string `json:"readinessEndpoints,omitempty"` - // trustedCA is a reference to a ConfigMap containing a CA certificate bundle used - // for client egress HTTPS connections. The certificate bundle must be from the CA - // that signed the proxy's certificate and be signed for everything. The trustedCA - // field should only be consumed by a proxy validator. The validator is responsible - // for reading the certificate bundle from required key "ca-bundle.crt" and copying - // it to a ConfigMap named "trusted-ca-bundle" in the "openshift-config-managed" - // namespace. The namespace for the ConfigMap referenced by trustedCA is + // trustedCA is a reference to a ConfigMap containing a CA certificate bundle. + // The trustedCA field should only be consumed by a proxy validator. The + // validator is responsible for reading the certificate bundle from the required + // key "ca-bundle.crt", merging it with the system default trust bundle, + // and writing the merged trust bundle to a ConfigMap named "trusted-ca-bundle" + // in the "openshift-config-managed" namespace. Clients that expect to make + // proxy connections must use the trusted-ca-bundle for all HTTPS requests to + // the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as + // well. + // + // The namespace for the ConfigMap referenced by trustedCA is // "openshift-config". Here is an example ConfigMap (in yaml): // // apiVersion: v1 diff --git a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go index ea788dc162df6a2eb8ac0253bd0c10894006a671..9dbacb99668148956466a38cf275ee039561f148 100644 --- a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go +++ b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go @@ -136,6 +136,7 @@ type CustomTLSProfile struct { } // TLSProfileType defines a TLS security profile type. +// +kubebuilder:validation:Enum=Old;Intermediate;Modern;Custom type TLSProfileType string const ( @@ -180,6 +181,7 @@ type TLSProfileSpec struct { // // Note that SSLv3.0 is not a supported protocol version due to well known // vulnerabilities such as POODLE: https://en.wikipedia.org/wiki/POODLE +// +kubebuilder:validation:Enum=VersionTLS10;VersionTLS11;VersionTLS12;VersionTLS13 type TLSProtocolVersion string const ( diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 81b5c10fb7a089c38997ff8a69061dc96a0dcb92..1128c6bebb14e352db02a20835f91e61b969ea5a 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -303,7 +303,7 @@ var map_AuthenticationSpec = map[string]string{ "type": "type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth.", "oauthMetadata": "oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key \"oauthMetadata\" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.", "webhookTokenAuthenticators": "webhookTokenAuthenticators configures remote token reviewers. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. The namespace for these secrets is openshift-config.", - "serviceAccountIssuer": "serviceAccountIssuer is the identifier of the bound service account token issuer. The default is auth.openshift.io.", + "serviceAccountIssuer": "serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc", } func (AuthenticationSpec) SwaggerDoc() map[string]string { @@ -1220,7 +1220,7 @@ var map_ProxySpec = map[string]string{ "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.", "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in an env var.", "readinessEndpoints": "readinessEndpoints is a list of endpoints used to verify readiness of the proxy.", - "trustedCA": "trustedCA is a reference to a ConfigMap containing a CA certificate bundle used for client egress HTTPS connections. The certificate bundle must be from the CA that signed the proxy's certificate and be signed for everything. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from required key \"ca-bundle.crt\" and copying it to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml):\n\napiVersion: v1 kind: ConfigMap metadata:\n name: user-ca-bundle\n namespace: openshift-config\n data:\n ca-bundle.crt: |", + "trustedCA": "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well.\n\nThe namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml):\n\napiVersion: v1 kind: ConfigMap metadata:\n name: user-ca-bundle\n namespace: openshift-config\n data:\n ca-bundle.crt: |", } func (ProxySpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/go.mod b/vendor/github.com/openshift/api/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..85dc4c852060ff750b5c0b40003929530490fab9 --- /dev/null +++ b/vendor/github.com/openshift/api/go.mod @@ -0,0 +1,14 @@ +module github.com/openshift/api + +go 1.13 + +require ( + github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d + github.com/spf13/pflag v1.0.5 + golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 // indirect + golang.org/x/tools v0.0.0-20200115044656-831fdb1e1868 // indirect + k8s.io/api v0.17.1 + k8s.io/apimachinery v0.17.1 + k8s.io/code-generator v0.17.1 + k8s.io/klog v1.0.0 +) diff --git a/vendor/github.com/openshift/api/go.sum b/vendor/github.com/openshift/api/go.sum new file mode 100644 index 0000000000000000000000000000000000000000..fa669868609105e8816c3151de642d34ecf91c60 --- /dev/null +++ b/vendor/github.com/openshift/api/go.sum @@ -0,0 +1,183 @@ +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72 h1:bw9doJza/SFBEweII/rHQh338oozWyiFsBRHtrflcws= +golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200115044656-831fdb1e1868 h1:6VZw2h4iwEB4GwgQU3Jvcsm8l9+yReTrErAEK1k6AC4= +golang.org/x/tools v0.0.0-20200115044656-831fdb1e1868/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +k8s.io/api v0.17.1 h1:i46MidoDOE9tvQ0TTEYggf3ka/pziP1+tHI/GFVeJao= +k8s.io/api v0.17.1/go.mod h1:zxiAc5y8Ngn4fmhWUtSxuUlkfz1ixT7j9wESokELzOg= +k8s.io/apimachinery v0.17.1 h1:zUjS3szTxoUjTDYNvdFkYt2uMEXLcthcbp+7uZvWhYM= +k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/code-generator v0.17.1 h1:e3B1UqRzRUWygp7WD+QTRT3ZUahPIaRKF0OFa7duQwI= +k8s.io/code-generator v0.17.1/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM= +k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/vendor/github.com/openshift/api/image/OWNERS b/vendor/github.com/openshift/api/image/OWNERS new file mode 100644 index 0000000000000000000000000000000000000000..c12602811e97d01187036c35279eb6d8969eb2ac --- /dev/null +++ b/vendor/github.com/openshift/api/image/OWNERS @@ -0,0 +1,5 @@ +reviewers: + - bparees + - dmage + - legionus + - miminar diff --git a/vendor/github.com/openshift/api/image/docker10/doc.go b/vendor/github.com/openshift/api/image/docker10/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..cc194d24db2ca5fe375206c0b6c6bc0827d84195 --- /dev/null +++ b/vendor/github.com/openshift/api/image/docker10/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package docker10 is the docker10 version of the API. +package docker10 diff --git a/vendor/github.com/openshift/api/image/docker10/dockertypes.go b/vendor/github.com/openshift/api/image/docker10/dockertypes.go new file mode 100644 index 0000000000000000000000000000000000000000..a985553db407d535e95b649bcae42decde532111 --- /dev/null +++ b/vendor/github.com/openshift/api/image/docker10/dockertypes.go @@ -0,0 +1,56 @@ +package docker10 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DockerImage is the type representing a container image and its various properties when +// retrieved from the Docker client API. +type DockerImage struct { + metav1.TypeMeta `json:",inline"` + + ID string `json:"Id"` + Parent string `json:"Parent,omitempty"` + Comment string `json:"Comment,omitempty"` + Created metav1.Time `json:"Created,omitempty"` + Container string `json:"Container,omitempty"` + ContainerConfig DockerConfig `json:"ContainerConfig,omitempty"` + DockerVersion string `json:"DockerVersion,omitempty"` + Author string `json:"Author,omitempty"` + Config *DockerConfig `json:"Config,omitempty"` + Architecture string `json:"Architecture,omitempty"` + Size int64 `json:"Size,omitempty"` +} + +// DockerConfig is the list of configuration options used when creating a container. +type DockerConfig struct { + Hostname string `json:"Hostname,omitempty"` + Domainname string `json:"Domainname,omitempty"` + User string `json:"User,omitempty"` + Memory int64 `json:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty"` + PortSpecs []string `json:"PortSpecs,omitempty"` + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + Tty bool `json:"Tty,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty"` + Env []string `json:"Env,omitempty"` + Cmd []string `json:"Cmd,omitempty"` + DNS []string `json:"Dns,omitempty"` // For Docker API v1.9 and below only + Image string `json:"Image,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty"` + VolumesFrom string `json:"VolumesFrom,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty"` + Entrypoint []string `json:"Entrypoint,omitempty"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty"` + SecurityOpts []string `json:"SecurityOpts,omitempty"` + OnBuild []string `json:"OnBuild,omitempty"` + Labels map[string]string `json:"Labels,omitempty"` +} diff --git a/vendor/github.com/openshift/api/image/docker10/register.go b/vendor/github.com/openshift/api/image/docker10/register.go new file mode 100644 index 0000000000000000000000000000000000000000..31d616a06c0c9a42607146fc72ef60bd9d074668 --- /dev/null +++ b/vendor/github.com/openshift/api/image/docker10/register.go @@ -0,0 +1,38 @@ +package docker10 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + GroupName = "image.openshift.io" + LegacyGroupName = "" +) + +// SchemeGroupVersion is group version used to register these objects +var ( + SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "1.0"} + LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "1.0"} + + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) + + AddToScheme = SchemeBuilder.AddToScheme + AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &DockerImage{}, + ) + return nil +} + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(LegacySchemeGroupVersion, + &DockerImage{}, + ) + return nil +} diff --git a/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..b59f75ac217425904531d5394af489a876d926ef --- /dev/null +++ b/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go @@ -0,0 +1,113 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package docker10 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerConfig) DeepCopyInto(out *DockerConfig) { + *out = *in + if in.PortSpecs != nil { + in, out := &in.PortSpecs, &out.PortSpecs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecurityOpts != nil { + in, out := &in.SecurityOpts, &out.SecurityOpts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig. +func (in *DockerConfig) DeepCopy() *DockerConfig { + if in == nil { + return nil + } + out := new(DockerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerImage) DeepCopyInto(out *DockerImage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Created.DeepCopyInto(&out.Created) + in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(DockerConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImage. +func (in *DockerImage) DeepCopy() *DockerImage { + if in == nil { + return nil + } + out := new(DockerImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DockerImage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go b/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..ddeb4403c4d9f6ea28cc93b49299a4670c1ad59a --- /dev/null +++ b/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go @@ -0,0 +1,18 @@ +package dockerpre012 + +// DeepCopyInto is manually built to copy the (probably bugged) time.Time +func (in *ImagePre012) DeepCopyInto(out *ImagePre012) { + *out = *in + out.Created = in.Created + in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) + if in.Config != nil { + in, out := &in.Config, &out.Config + if *in == nil { + *out = nil + } else { + *out = new(Config) + (*in).DeepCopyInto(*out) + } + } + return +} diff --git a/vendor/github.com/openshift/api/image/dockerpre012/doc.go b/vendor/github.com/openshift/api/image/dockerpre012/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..e4a56260f10b9f9b8939a5e7fe053d8706852e2f --- /dev/null +++ b/vendor/github.com/openshift/api/image/dockerpre012/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package dockerpre012 is the dockerpre012 version of the API. +package dockerpre012 diff --git a/vendor/github.com/openshift/api/image/dockerpre012/dockertypes.go b/vendor/github.com/openshift/api/image/dockerpre012/dockertypes.go new file mode 100644 index 0000000000000000000000000000000000000000..685e0b68c5d9014b41defa32fb3dc135d1d18520 --- /dev/null +++ b/vendor/github.com/openshift/api/image/dockerpre012/dockertypes.go @@ -0,0 +1,136 @@ +package dockerpre012 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DockerImage is for earlier versions of the Docker API (pre-012 to be specific). It is also the +// version of metadata that the container image registry uses to persist metadata. +type DockerImage struct { + metav1.TypeMeta `json:",inline"` + + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created metav1.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig DockerConfig `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *DockerConfig `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 `json:"size,omitempty"` +} + +// DockerConfig is the list of configuration options used when creating a container. +type DockerConfig struct { + Hostname string `json:"Hostname,omitempty"` + Domainname string `json:"Domainname,omitempty"` + User string `json:"User,omitempty"` + Memory int64 `json:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty"` + PortSpecs []string `json:"PortSpecs,omitempty"` + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + Tty bool `json:"Tty,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty"` + Env []string `json:"Env,omitempty"` + Cmd []string `json:"Cmd,omitempty"` + DNS []string `json:"Dns,omitempty"` // For Docker API v1.9 and below only + Image string `json:"Image,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty"` + VolumesFrom string `json:"VolumesFrom,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty"` + Entrypoint []string `json:"Entrypoint,omitempty"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty"` + SecurityOpts []string `json:"SecurityOpts,omitempty"` + OnBuild []string `json:"OnBuild,omitempty"` + // This field is not supported in pre012 and will always be empty. + Labels map[string]string `json:"Labels,omitempty"` +} + +// ImagePre012 serves the same purpose as the Image type except that it is for +// earlier versions of the Docker API (pre-012 to be specific) +// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient +type ImagePre012 struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig Config `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *Config `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 `json:"size,omitempty"` +} + +// Config is the list of configuration options used when creating a container. +// Config does not contain the options that are specific to starting a container on a +// given host. Those are contained in HostConfig +// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient +type Config struct { + Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty"` + Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty"` + User string `json:"User,omitempty" yaml:"User,omitempty"` + Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"` + MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty"` + KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty"` + PidsLimit int64 `json:"PidsLimit,omitempty" yaml:"PidsLimit,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"` + PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"` + ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"` + StopSignal string `json:"StopSignal,omitempty" yaml:"StopSignal,omitempty"` + Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"` + Env []string `json:"Env,omitempty" yaml:"Env,omitempty"` + Cmd []string `json:"Cmd" yaml:"Cmd"` + DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only + Image string `json:"Image,omitempty" yaml:"Image,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"` + VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"` + VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"` + MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"` + Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"` + SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"` + OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"` + Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` +} + +// Mount represents a mount point in the container. +// +// It has been added in the version 1.20 of the Docker API, available since +// Docker 1.8. +// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient +type Mount struct { + Name string + Source string + Destination string + Driver string + Mode string + RW bool +} + +// Port represents the port number and the protocol, in the form +// <number>/<protocol>. For example: 80/tcp. +// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient +type Port string diff --git a/vendor/github.com/openshift/api/image/dockerpre012/register.go b/vendor/github.com/openshift/api/image/dockerpre012/register.go new file mode 100644 index 0000000000000000000000000000000000000000..469806dbe7447e20c16a7a4fddd7f0561c48a08e --- /dev/null +++ b/vendor/github.com/openshift/api/image/dockerpre012/register.go @@ -0,0 +1,37 @@ +package dockerpre012 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + GroupName = "image.openshift.io" + LegacyGroupName = "" +) + +var ( + SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "pre012"} + LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "pre012"} + + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme + + LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) + AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &DockerImage{}, + ) + return nil +} + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(LegacySchemeGroupVersion, + &DockerImage{}, + ) + return nil +} diff --git a/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..d9042704ad8e0fd2a296da46fdc9f2944b13eb82 --- /dev/null +++ b/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go @@ -0,0 +1,216 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package dockerpre012 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + if in.PortSpecs != nil { + in, out := &in.PortSpecs, &out.PortSpecs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[Port]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecurityOpts != nil { + in, out := &in.SecurityOpts, &out.SecurityOpts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Mounts != nil { + in, out := &in.Mounts, &out.Mounts + *out = make([]Mount, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerConfig) DeepCopyInto(out *DockerConfig) { + *out = *in + if in.PortSpecs != nil { + in, out := &in.PortSpecs, &out.PortSpecs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecurityOpts != nil { + in, out := &in.SecurityOpts, &out.SecurityOpts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig. +func (in *DockerConfig) DeepCopy() *DockerConfig { + if in == nil { + return nil + } + out := new(DockerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerImage) DeepCopyInto(out *DockerImage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Created.DeepCopyInto(&out.Created) + in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(DockerConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImage. +func (in *DockerImage) DeepCopy() *DockerImage { + if in == nil { + return nil + } + out := new(DockerImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DockerImage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePre012. +func (in *ImagePre012) DeepCopy() *ImagePre012 { + if in == nil { + return nil + } + out := new(ImagePre012) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mount) DeepCopyInto(out *Mount) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mount. +func (in *Mount) DeepCopy() *Mount { + if in == nil { + return nil + } + out := new(Mount) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/image/install.go b/vendor/github.com/openshift/api/image/install.go new file mode 100644 index 0000000000000000000000000000000000000000..5b146faa7eefd9cef360d24da552465355ba9f27 --- /dev/null +++ b/vendor/github.com/openshift/api/image/install.go @@ -0,0 +1,26 @@ +package image + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + imagev1 "github.com/openshift/api/image/v1" +) + +const ( + GroupName = "image.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(imagev1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/image/v1/consts.go b/vendor/github.com/openshift/api/image/v1/consts.go new file mode 100644 index 0000000000000000000000000000000000000000..11f57a44a3693ef31608c9aadfa9a1207aa2c927 --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/consts.go @@ -0,0 +1,69 @@ +package v1 + +import corev1 "k8s.io/api/core/v1" + +const ( + // ManagedByOpenShiftAnnotation indicates that an image is managed by OpenShift's registry. + ManagedByOpenShiftAnnotation = "openshift.io/image.managed" + + // DockerImageRepositoryCheckAnnotation indicates that OpenShift has + // attempted to import tag and image information from an external Docker + // image repository. + DockerImageRepositoryCheckAnnotation = "openshift.io/image.dockerRepositoryCheck" + + // InsecureRepositoryAnnotation may be set true on an image stream to allow insecure access to pull content. + InsecureRepositoryAnnotation = "openshift.io/image.insecureRepository" + + // ExcludeImageSecretAnnotation indicates that a secret should not be returned by imagestream/secrets. + ExcludeImageSecretAnnotation = "openshift.io/image.excludeSecret" + + // DockerImageLayersOrderAnnotation describes layers order in the docker image. + DockerImageLayersOrderAnnotation = "image.openshift.io/dockerLayersOrder" + + // DockerImageLayersOrderAscending indicates that image layers are sorted in + // the order of their addition (from oldest to latest) + DockerImageLayersOrderAscending = "ascending" + + // DockerImageLayersOrderDescending indicates that layers are sorted in + // reversed order of their addition (from newest to oldest). + DockerImageLayersOrderDescending = "descending" + + // ImporterPreferArchAnnotation represents an architecture that should be + // selected if an image uses a manifest list and it should be + // downconverted. + ImporterPreferArchAnnotation = "importer.image.openshift.io/prefer-arch" + + // ImporterPreferOSAnnotation represents an operation system that should + // be selected if an image uses a manifest list and it should be + // downconverted. + ImporterPreferOSAnnotation = "importer.image.openshift.io/prefer-os" + + // ImageManifestBlobStoredAnnotation indicates that manifest and config blobs of image are stored in on + // storage of integrated Docker registry. + ImageManifestBlobStoredAnnotation = "image.openshift.io/manifestBlobStored" + + // DefaultImageTag is used when an image tag is needed and the configuration does not specify a tag to use. + DefaultImageTag = "latest" + + // ResourceImageStreams represents a number of image streams in a project. + ResourceImageStreams corev1.ResourceName = "openshift.io/imagestreams" + + // ResourceImageStreamImages represents a number of unique references to images in all image stream + // statuses of a project. + ResourceImageStreamImages corev1.ResourceName = "openshift.io/images" + + // ResourceImageStreamTags represents a number of unique references to images in all image stream specs + // of a project. + ResourceImageStreamTags corev1.ResourceName = "openshift.io/image-tags" + + // Limit that applies to images. Used with a max["storage"] LimitRangeItem to set + // the maximum size of an image. + LimitTypeImage corev1.LimitType = "openshift.io/Image" + + // Limit that applies to image streams. Used with a max[resource] LimitRangeItem to set the maximum number + // of resource. Where the resource is one of "openshift.io/images" and "openshift.io/image-tags". + LimitTypeImageStream corev1.LimitType = "openshift.io/ImageStream" + + // The supported type of image signature. + ImageSignatureTypeAtomicImageV1 string = "AtomicImageV1" +) diff --git a/vendor/github.com/openshift/api/image/v1/doc.go b/vendor/github.com/openshift/api/image/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..e57d45bbf944016ede39c99fc65359f7a7d79869 --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/image/apis/image +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=image.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/image/v1/generated.pb.go b/vendor/github.com/openshift/api/image/v1/generated.pb.go new file mode 100644 index 0000000000000000000000000000000000000000..a320746faa6ed792c7b5d1b030f8e8d417e8a708 --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/generated.pb.go @@ -0,0 +1,10916 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/image/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *DockerImageReference) Reset() { *m = DockerImageReference{} } +func (*DockerImageReference) ProtoMessage() {} +func (*DockerImageReference) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{0} +} +func (m *DockerImageReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DockerImageReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DockerImageReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_DockerImageReference.Merge(m, src) +} +func (m *DockerImageReference) XXX_Size() int { + return m.Size() +} +func (m *DockerImageReference) XXX_DiscardUnknown() { + xxx_messageInfo_DockerImageReference.DiscardUnknown(m) +} + +var xxx_messageInfo_DockerImageReference proto.InternalMessageInfo + +func (m *Image) Reset() { *m = Image{} } +func (*Image) ProtoMessage() {} +func (*Image) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{1} +} +func (m *Image) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Image) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Image) XXX_Merge(src proto.Message) { + xxx_messageInfo_Image.Merge(m, src) +} +func (m *Image) XXX_Size() int { + return m.Size() +} +func (m *Image) XXX_DiscardUnknown() { + xxx_messageInfo_Image.DiscardUnknown(m) +} + +var xxx_messageInfo_Image proto.InternalMessageInfo + +func (m *ImageBlobReferences) Reset() { *m = ImageBlobReferences{} } +func (*ImageBlobReferences) ProtoMessage() {} +func (*ImageBlobReferences) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{2} +} +func (m *ImageBlobReferences) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageBlobReferences) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageBlobReferences) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageBlobReferences.Merge(m, src) +} +func (m *ImageBlobReferences) XXX_Size() int { + return m.Size() +} +func (m *ImageBlobReferences) XXX_DiscardUnknown() { + xxx_messageInfo_ImageBlobReferences.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageBlobReferences proto.InternalMessageInfo + +func (m *ImageImportSpec) Reset() { *m = ImageImportSpec{} } +func (*ImageImportSpec) ProtoMessage() {} +func (*ImageImportSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{3} +} +func (m *ImageImportSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageImportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageImportSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageImportSpec.Merge(m, src) +} +func (m *ImageImportSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageImportSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageImportSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageImportSpec proto.InternalMessageInfo + +func (m *ImageImportStatus) Reset() { *m = ImageImportStatus{} } +func (*ImageImportStatus) ProtoMessage() {} +func (*ImageImportStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{4} +} +func (m *ImageImportStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageImportStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageImportStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageImportStatus.Merge(m, src) +} +func (m *ImageImportStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageImportStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageImportStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageImportStatus proto.InternalMessageInfo + +func (m *ImageLayer) Reset() { *m = ImageLayer{} } +func (*ImageLayer) ProtoMessage() {} +func (*ImageLayer) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{5} +} +func (m *ImageLayer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageLayer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageLayer) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageLayer.Merge(m, src) +} +func (m *ImageLayer) XXX_Size() int { + return m.Size() +} +func (m *ImageLayer) XXX_DiscardUnknown() { + xxx_messageInfo_ImageLayer.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageLayer proto.InternalMessageInfo + +func (m *ImageLayerData) Reset() { *m = ImageLayerData{} } +func (*ImageLayerData) ProtoMessage() {} +func (*ImageLayerData) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{6} +} +func (m *ImageLayerData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageLayerData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageLayerData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageLayerData.Merge(m, src) +} +func (m *ImageLayerData) XXX_Size() int { + return m.Size() +} +func (m *ImageLayerData) XXX_DiscardUnknown() { + xxx_messageInfo_ImageLayerData.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageLayerData proto.InternalMessageInfo + +func (m *ImageList) Reset() { *m = ImageList{} } +func (*ImageList) ProtoMessage() {} +func (*ImageList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{7} +} +func (m *ImageList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageList.Merge(m, src) +} +func (m *ImageList) XXX_Size() int { + return m.Size() +} +func (m *ImageList) XXX_DiscardUnknown() { + xxx_messageInfo_ImageList.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageList proto.InternalMessageInfo + +func (m *ImageLookupPolicy) Reset() { *m = ImageLookupPolicy{} } +func (*ImageLookupPolicy) ProtoMessage() {} +func (*ImageLookupPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{8} +} +func (m *ImageLookupPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageLookupPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageLookupPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageLookupPolicy.Merge(m, src) +} +func (m *ImageLookupPolicy) XXX_Size() int { + return m.Size() +} +func (m *ImageLookupPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_ImageLookupPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageLookupPolicy proto.InternalMessageInfo + +func (m *ImageSignature) Reset() { *m = ImageSignature{} } +func (*ImageSignature) ProtoMessage() {} +func (*ImageSignature) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{9} +} +func (m *ImageSignature) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageSignature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageSignature) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageSignature.Merge(m, src) +} +func (m *ImageSignature) XXX_Size() int { + return m.Size() +} +func (m *ImageSignature) XXX_DiscardUnknown() { + xxx_messageInfo_ImageSignature.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageSignature proto.InternalMessageInfo + +func (m *ImageStream) Reset() { *m = ImageStream{} } +func (*ImageStream) ProtoMessage() {} +func (*ImageStream) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{10} +} +func (m *ImageStream) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStream) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStream.Merge(m, src) +} +func (m *ImageStream) XXX_Size() int { + return m.Size() +} +func (m *ImageStream) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStream.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStream proto.InternalMessageInfo + +func (m *ImageStreamImage) Reset() { *m = ImageStreamImage{} } +func (*ImageStreamImage) ProtoMessage() {} +func (*ImageStreamImage) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{11} +} +func (m *ImageStreamImage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamImage) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamImage.Merge(m, src) +} +func (m *ImageStreamImage) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamImage) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamImage.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamImage proto.InternalMessageInfo + +func (m *ImageStreamImport) Reset() { *m = ImageStreamImport{} } +func (*ImageStreamImport) ProtoMessage() {} +func (*ImageStreamImport) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{12} +} +func (m *ImageStreamImport) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamImport) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamImport) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamImport.Merge(m, src) +} +func (m *ImageStreamImport) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamImport) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamImport.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamImport proto.InternalMessageInfo + +func (m *ImageStreamImportSpec) Reset() { *m = ImageStreamImportSpec{} } +func (*ImageStreamImportSpec) ProtoMessage() {} +func (*ImageStreamImportSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{13} +} +func (m *ImageStreamImportSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamImportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamImportSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamImportSpec.Merge(m, src) +} +func (m *ImageStreamImportSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamImportSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamImportSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamImportSpec proto.InternalMessageInfo + +func (m *ImageStreamImportStatus) Reset() { *m = ImageStreamImportStatus{} } +func (*ImageStreamImportStatus) ProtoMessage() {} +func (*ImageStreamImportStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{14} +} +func (m *ImageStreamImportStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamImportStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamImportStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamImportStatus.Merge(m, src) +} +func (m *ImageStreamImportStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamImportStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamImportStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamImportStatus proto.InternalMessageInfo + +func (m *ImageStreamLayers) Reset() { *m = ImageStreamLayers{} } +func (*ImageStreamLayers) ProtoMessage() {} +func (*ImageStreamLayers) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{15} +} +func (m *ImageStreamLayers) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamLayers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamLayers) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamLayers.Merge(m, src) +} +func (m *ImageStreamLayers) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamLayers) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamLayers.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamLayers proto.InternalMessageInfo + +func (m *ImageStreamList) Reset() { *m = ImageStreamList{} } +func (*ImageStreamList) ProtoMessage() {} +func (*ImageStreamList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{16} +} +func (m *ImageStreamList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamList.Merge(m, src) +} +func (m *ImageStreamList) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamList) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamList.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamList proto.InternalMessageInfo + +func (m *ImageStreamMapping) Reset() { *m = ImageStreamMapping{} } +func (*ImageStreamMapping) ProtoMessage() {} +func (*ImageStreamMapping) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{17} +} +func (m *ImageStreamMapping) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamMapping) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamMapping.Merge(m, src) +} +func (m *ImageStreamMapping) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamMapping) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamMapping.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamMapping proto.InternalMessageInfo + +func (m *ImageStreamSpec) Reset() { *m = ImageStreamSpec{} } +func (*ImageStreamSpec) ProtoMessage() {} +func (*ImageStreamSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{18} +} +func (m *ImageStreamSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamSpec.Merge(m, src) +} +func (m *ImageStreamSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamSpec proto.InternalMessageInfo + +func (m *ImageStreamStatus) Reset() { *m = ImageStreamStatus{} } +func (*ImageStreamStatus) ProtoMessage() {} +func (*ImageStreamStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{19} +} +func (m *ImageStreamStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamStatus.Merge(m, src) +} +func (m *ImageStreamStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamStatus proto.InternalMessageInfo + +func (m *ImageStreamTag) Reset() { *m = ImageStreamTag{} } +func (*ImageStreamTag) ProtoMessage() {} +func (*ImageStreamTag) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{20} +} +func (m *ImageStreamTag) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamTag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamTag) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamTag.Merge(m, src) +} +func (m *ImageStreamTag) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamTag) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamTag.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamTag proto.InternalMessageInfo + +func (m *ImageStreamTagList) Reset() { *m = ImageStreamTagList{} } +func (*ImageStreamTagList) ProtoMessage() {} +func (*ImageStreamTagList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{21} +} +func (m *ImageStreamTagList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamTagList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamTagList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamTagList.Merge(m, src) +} +func (m *ImageStreamTagList) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamTagList) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamTagList.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamTagList proto.InternalMessageInfo + +func (m *ImageTag) Reset() { *m = ImageTag{} } +func (*ImageTag) ProtoMessage() {} +func (*ImageTag) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{22} +} +func (m *ImageTag) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageTag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageTag) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageTag.Merge(m, src) +} +func (m *ImageTag) XXX_Size() int { + return m.Size() +} +func (m *ImageTag) XXX_DiscardUnknown() { + xxx_messageInfo_ImageTag.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageTag proto.InternalMessageInfo + +func (m *ImageTagList) Reset() { *m = ImageTagList{} } +func (*ImageTagList) ProtoMessage() {} +func (*ImageTagList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{23} +} +func (m *ImageTagList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageTagList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageTagList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageTagList.Merge(m, src) +} +func (m *ImageTagList) XXX_Size() int { + return m.Size() +} +func (m *ImageTagList) XXX_DiscardUnknown() { + xxx_messageInfo_ImageTagList.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageTagList proto.InternalMessageInfo + +func (m *NamedTagEventList) Reset() { *m = NamedTagEventList{} } +func (*NamedTagEventList) ProtoMessage() {} +func (*NamedTagEventList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{24} +} +func (m *NamedTagEventList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedTagEventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedTagEventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedTagEventList.Merge(m, src) +} +func (m *NamedTagEventList) XXX_Size() int { + return m.Size() +} +func (m *NamedTagEventList) XXX_DiscardUnknown() { + xxx_messageInfo_NamedTagEventList.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedTagEventList proto.InternalMessageInfo + +func (m *RepositoryImportSpec) Reset() { *m = RepositoryImportSpec{} } +func (*RepositoryImportSpec) ProtoMessage() {} +func (*RepositoryImportSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{25} +} +func (m *RepositoryImportSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RepositoryImportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RepositoryImportSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_RepositoryImportSpec.Merge(m, src) +} +func (m *RepositoryImportSpec) XXX_Size() int { + return m.Size() +} +func (m *RepositoryImportSpec) XXX_DiscardUnknown() { + xxx_messageInfo_RepositoryImportSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_RepositoryImportSpec proto.InternalMessageInfo + +func (m *RepositoryImportStatus) Reset() { *m = RepositoryImportStatus{} } +func (*RepositoryImportStatus) ProtoMessage() {} +func (*RepositoryImportStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{26} +} +func (m *RepositoryImportStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RepositoryImportStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RepositoryImportStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_RepositoryImportStatus.Merge(m, src) +} +func (m *RepositoryImportStatus) XXX_Size() int { + return m.Size() +} +func (m *RepositoryImportStatus) XXX_DiscardUnknown() { + xxx_messageInfo_RepositoryImportStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_RepositoryImportStatus proto.InternalMessageInfo + +func (m *SignatureCondition) Reset() { *m = SignatureCondition{} } +func (*SignatureCondition) ProtoMessage() {} +func (*SignatureCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{27} +} +func (m *SignatureCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SignatureCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureCondition.Merge(m, src) +} +func (m *SignatureCondition) XXX_Size() int { + return m.Size() +} +func (m *SignatureCondition) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureCondition proto.InternalMessageInfo + +func (m *SignatureGenericEntity) Reset() { *m = SignatureGenericEntity{} } +func (*SignatureGenericEntity) ProtoMessage() {} +func (*SignatureGenericEntity) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{28} +} +func (m *SignatureGenericEntity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureGenericEntity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SignatureGenericEntity) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureGenericEntity.Merge(m, src) +} +func (m *SignatureGenericEntity) XXX_Size() int { + return m.Size() +} +func (m *SignatureGenericEntity) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureGenericEntity.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureGenericEntity proto.InternalMessageInfo + +func (m *SignatureIssuer) Reset() { *m = SignatureIssuer{} } +func (*SignatureIssuer) ProtoMessage() {} +func (*SignatureIssuer) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{29} +} +func (m *SignatureIssuer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureIssuer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SignatureIssuer) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureIssuer.Merge(m, src) +} +func (m *SignatureIssuer) XXX_Size() int { + return m.Size() +} +func (m *SignatureIssuer) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureIssuer.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureIssuer proto.InternalMessageInfo + +func (m *SignatureSubject) Reset() { *m = SignatureSubject{} } +func (*SignatureSubject) ProtoMessage() {} +func (*SignatureSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{30} +} +func (m *SignatureSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SignatureSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureSubject.Merge(m, src) +} +func (m *SignatureSubject) XXX_Size() int { + return m.Size() +} +func (m *SignatureSubject) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureSubject proto.InternalMessageInfo + +func (m *TagEvent) Reset() { *m = TagEvent{} } +func (*TagEvent) ProtoMessage() {} +func (*TagEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{31} +} +func (m *TagEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagEvent.Merge(m, src) +} +func (m *TagEvent) XXX_Size() int { + return m.Size() +} +func (m *TagEvent) XXX_DiscardUnknown() { + xxx_messageInfo_TagEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_TagEvent proto.InternalMessageInfo + +func (m *TagEventCondition) Reset() { *m = TagEventCondition{} } +func (*TagEventCondition) ProtoMessage() {} +func (*TagEventCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{32} +} +func (m *TagEventCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagEventCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagEventCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagEventCondition.Merge(m, src) +} +func (m *TagEventCondition) XXX_Size() int { + return m.Size() +} +func (m *TagEventCondition) XXX_DiscardUnknown() { + xxx_messageInfo_TagEventCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_TagEventCondition proto.InternalMessageInfo + +func (m *TagImportPolicy) Reset() { *m = TagImportPolicy{} } +func (*TagImportPolicy) ProtoMessage() {} +func (*TagImportPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{33} +} +func (m *TagImportPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagImportPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagImportPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagImportPolicy.Merge(m, src) +} +func (m *TagImportPolicy) XXX_Size() int { + return m.Size() +} +func (m *TagImportPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_TagImportPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_TagImportPolicy proto.InternalMessageInfo + +func (m *TagReference) Reset() { *m = TagReference{} } +func (*TagReference) ProtoMessage() {} +func (*TagReference) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{34} +} +func (m *TagReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagReference.Merge(m, src) +} +func (m *TagReference) XXX_Size() int { + return m.Size() +} +func (m *TagReference) XXX_DiscardUnknown() { + xxx_messageInfo_TagReference.DiscardUnknown(m) +} + +var xxx_messageInfo_TagReference proto.InternalMessageInfo + +func (m *TagReferencePolicy) Reset() { *m = TagReferencePolicy{} } +func (*TagReferencePolicy) ProtoMessage() {} +func (*TagReferencePolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{35} +} +func (m *TagReferencePolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagReferencePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagReferencePolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagReferencePolicy.Merge(m, src) +} +func (m *TagReferencePolicy) XXX_Size() int { + return m.Size() +} +func (m *TagReferencePolicy) XXX_DiscardUnknown() { + xxx_messageInfo_TagReferencePolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_TagReferencePolicy proto.InternalMessageInfo + +func init() { + proto.RegisterType((*DockerImageReference)(nil), "github.com.openshift.api.image.v1.DockerImageReference") + proto.RegisterType((*Image)(nil), "github.com.openshift.api.image.v1.Image") + proto.RegisterType((*ImageBlobReferences)(nil), "github.com.openshift.api.image.v1.ImageBlobReferences") + proto.RegisterType((*ImageImportSpec)(nil), "github.com.openshift.api.image.v1.ImageImportSpec") + proto.RegisterType((*ImageImportStatus)(nil), "github.com.openshift.api.image.v1.ImageImportStatus") + proto.RegisterType((*ImageLayer)(nil), "github.com.openshift.api.image.v1.ImageLayer") + proto.RegisterType((*ImageLayerData)(nil), "github.com.openshift.api.image.v1.ImageLayerData") + proto.RegisterType((*ImageList)(nil), "github.com.openshift.api.image.v1.ImageList") + proto.RegisterType((*ImageLookupPolicy)(nil), "github.com.openshift.api.image.v1.ImageLookupPolicy") + proto.RegisterType((*ImageSignature)(nil), "github.com.openshift.api.image.v1.ImageSignature") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.image.v1.ImageSignature.SignedClaimsEntry") + proto.RegisterType((*ImageStream)(nil), "github.com.openshift.api.image.v1.ImageStream") + proto.RegisterType((*ImageStreamImage)(nil), "github.com.openshift.api.image.v1.ImageStreamImage") + proto.RegisterType((*ImageStreamImport)(nil), "github.com.openshift.api.image.v1.ImageStreamImport") + proto.RegisterType((*ImageStreamImportSpec)(nil), "github.com.openshift.api.image.v1.ImageStreamImportSpec") + proto.RegisterType((*ImageStreamImportStatus)(nil), "github.com.openshift.api.image.v1.ImageStreamImportStatus") + proto.RegisterType((*ImageStreamLayers)(nil), "github.com.openshift.api.image.v1.ImageStreamLayers") + proto.RegisterMapType((map[string]ImageLayerData)(nil), "github.com.openshift.api.image.v1.ImageStreamLayers.BlobsEntry") + proto.RegisterMapType((map[string]ImageBlobReferences)(nil), "github.com.openshift.api.image.v1.ImageStreamLayers.ImagesEntry") + proto.RegisterType((*ImageStreamList)(nil), "github.com.openshift.api.image.v1.ImageStreamList") + proto.RegisterType((*ImageStreamMapping)(nil), "github.com.openshift.api.image.v1.ImageStreamMapping") + proto.RegisterType((*ImageStreamSpec)(nil), "github.com.openshift.api.image.v1.ImageStreamSpec") + proto.RegisterType((*ImageStreamStatus)(nil), "github.com.openshift.api.image.v1.ImageStreamStatus") + proto.RegisterType((*ImageStreamTag)(nil), "github.com.openshift.api.image.v1.ImageStreamTag") + proto.RegisterType((*ImageStreamTagList)(nil), "github.com.openshift.api.image.v1.ImageStreamTagList") + proto.RegisterType((*ImageTag)(nil), "github.com.openshift.api.image.v1.ImageTag") + proto.RegisterType((*ImageTagList)(nil), "github.com.openshift.api.image.v1.ImageTagList") + proto.RegisterType((*NamedTagEventList)(nil), "github.com.openshift.api.image.v1.NamedTagEventList") + proto.RegisterType((*RepositoryImportSpec)(nil), "github.com.openshift.api.image.v1.RepositoryImportSpec") + proto.RegisterType((*RepositoryImportStatus)(nil), "github.com.openshift.api.image.v1.RepositoryImportStatus") + proto.RegisterType((*SignatureCondition)(nil), "github.com.openshift.api.image.v1.SignatureCondition") + proto.RegisterType((*SignatureGenericEntity)(nil), "github.com.openshift.api.image.v1.SignatureGenericEntity") + proto.RegisterType((*SignatureIssuer)(nil), "github.com.openshift.api.image.v1.SignatureIssuer") + proto.RegisterType((*SignatureSubject)(nil), "github.com.openshift.api.image.v1.SignatureSubject") + proto.RegisterType((*TagEvent)(nil), "github.com.openshift.api.image.v1.TagEvent") + proto.RegisterType((*TagEventCondition)(nil), "github.com.openshift.api.image.v1.TagEventCondition") + proto.RegisterType((*TagImportPolicy)(nil), "github.com.openshift.api.image.v1.TagImportPolicy") + proto.RegisterType((*TagReference)(nil), "github.com.openshift.api.image.v1.TagReference") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.image.v1.TagReference.AnnotationsEntry") + proto.RegisterType((*TagReferencePolicy)(nil), "github.com.openshift.api.image.v1.TagReferencePolicy") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/image/v1/generated.proto", fileDescriptor_650a0b34f65fde60) +} + +var fileDescriptor_650a0b34f65fde60 = []byte{ + // 2502 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5a, 0x4d, 0x6c, 0x1c, 0x49, + 0x15, 0x4e, 0xcf, 0x9f, 0xc7, 0xcf, 0x8e, 0x1d, 0x57, 0xe2, 0xdd, 0xd9, 0x49, 0xd6, 0xf6, 0x76, + 0x48, 0x14, 0x20, 0x3b, 0x83, 0x4d, 0x76, 0x71, 0x82, 0xc4, 0x6e, 0x26, 0x13, 0xa2, 0x01, 0x7b, + 0xe3, 0x2d, 0x0f, 0x11, 0x8a, 0x82, 0x44, 0xb9, 0xa7, 0xdc, 0x6e, 0x3c, 0xd3, 0x3d, 0x74, 0xf7, + 0x78, 0xd7, 0x11, 0x48, 0x1c, 0xd0, 0x6a, 0x0f, 0x1c, 0xe0, 0xc4, 0x61, 0x8f, 0x68, 0x85, 0x38, + 0x23, 0x10, 0x27, 0x2e, 0x80, 0x14, 0x71, 0x61, 0xb5, 0x5c, 0xf6, 0x82, 0xb5, 0x19, 0x38, 0x73, + 0xe3, 0xb2, 0x27, 0x54, 0xd5, 0xd5, 0xdd, 0xd5, 0x3d, 0x3d, 0x4e, 0xb7, 0x89, 0x07, 0xb8, 0x4d, + 0xd7, 0x7b, 0xef, 0x7b, 0xaf, 0xdf, 0xab, 0x7a, 0x3f, 0xd5, 0x03, 0xab, 0xba, 0xe1, 0xee, 0x0d, + 0x76, 0x6a, 0x9a, 0xd5, 0xab, 0x5b, 0x7d, 0x6a, 0x3a, 0x7b, 0xc6, 0xae, 0x5b, 0x27, 0x7d, 0xa3, + 0x6e, 0xf4, 0x88, 0x4e, 0xeb, 0x07, 0xab, 0x75, 0x9d, 0x9a, 0xd4, 0x26, 0x2e, 0xed, 0xd4, 0xfa, + 0xb6, 0xe5, 0x5a, 0xe8, 0x95, 0x50, 0xa4, 0x16, 0x88, 0xd4, 0x48, 0xdf, 0xa8, 0x71, 0x91, 0xda, + 0xc1, 0x6a, 0xf5, 0x55, 0x09, 0x55, 0xb7, 0x74, 0xab, 0xce, 0x25, 0x77, 0x06, 0xbb, 0xfc, 0x89, + 0x3f, 0xf0, 0x5f, 0x1e, 0x62, 0x55, 0xdd, 0x5f, 0x77, 0x6a, 0x86, 0xc5, 0xd5, 0x6a, 0x96, 0x9d, + 0xa4, 0xb5, 0x7a, 0x23, 0xe4, 0xe9, 0x11, 0x6d, 0xcf, 0x30, 0xa9, 0x7d, 0x58, 0xef, 0xef, 0xeb, + 0x6c, 0xc1, 0xa9, 0xf7, 0xa8, 0x4b, 0x92, 0xa4, 0xea, 0xe3, 0xa4, 0xec, 0x81, 0xe9, 0x1a, 0x3d, + 0x3a, 0x22, 0xf0, 0xfa, 0xb3, 0x04, 0x1c, 0x6d, 0x8f, 0xf6, 0x48, 0x5c, 0x4e, 0xfd, 0x58, 0x81, + 0x0b, 0x4d, 0x4b, 0xdb, 0xa7, 0x76, 0x8b, 0x39, 0x01, 0xd3, 0x5d, 0x6a, 0x53, 0x53, 0xa3, 0xe8, + 0x3a, 0x94, 0x6d, 0xaa, 0x1b, 0x8e, 0x6b, 0x1f, 0x56, 0x94, 0x15, 0xe5, 0xda, 0x74, 0xe3, 0xdc, + 0x93, 0xa3, 0xe5, 0x33, 0xc3, 0xa3, 0xe5, 0x32, 0x16, 0xeb, 0x38, 0xe0, 0x40, 0x75, 0x98, 0x36, + 0x49, 0x8f, 0x3a, 0x7d, 0xa2, 0xd1, 0x4a, 0x8e, 0xb3, 0x2f, 0x08, 0xf6, 0xe9, 0xb7, 0x7c, 0x02, + 0x0e, 0x79, 0xd0, 0x0a, 0x14, 0xd8, 0x43, 0x25, 0xcf, 0x79, 0x67, 0x05, 0x6f, 0x81, 0xf1, 0x62, + 0x4e, 0x41, 0x2f, 0x43, 0xde, 0x25, 0x7a, 0xa5, 0xc0, 0x19, 0x66, 0x04, 0x43, 0xbe, 0x4d, 0x74, + 0xcc, 0xd6, 0x51, 0x15, 0x72, 0x46, 0xb3, 0x52, 0xe4, 0x54, 0x10, 0xd4, 0x5c, 0xab, 0x89, 0x73, + 0x46, 0x53, 0xfd, 0xcb, 0x14, 0x14, 0xf9, 0xeb, 0xa0, 0xef, 0x42, 0x99, 0xb9, 0xb8, 0x43, 0x5c, + 0xc2, 0xdf, 0x62, 0x66, 0xed, 0x4b, 0x35, 0xcf, 0x53, 0x35, 0xd9, 0x53, 0xb5, 0xfe, 0xbe, 0xce, + 0x16, 0x9c, 0x1a, 0xe3, 0xae, 0x1d, 0xac, 0xd6, 0xee, 0xef, 0x7c, 0x8f, 0x6a, 0xee, 0x26, 0x75, + 0x49, 0x03, 0x09, 0x74, 0x08, 0xd7, 0x70, 0x80, 0x8a, 0xb6, 0xe0, 0x42, 0x27, 0xc1, 0x7f, 0xc2, + 0x09, 0x97, 0x84, 0x6c, 0xa2, 0x8f, 0x71, 0xa2, 0x24, 0xfa, 0x01, 0x9c, 0x97, 0xd6, 0x37, 0x7d, + 0xf3, 0xf3, 0xdc, 0xfc, 0x57, 0xc7, 0x9a, 0x2f, 0x02, 0x5d, 0xc3, 0xe4, 0x9d, 0xbb, 0xef, 0xba, + 0xd4, 0x74, 0x0c, 0xcb, 0x6c, 0x5c, 0x14, 0xfa, 0xcf, 0x37, 0x47, 0x11, 0x71, 0x92, 0x1a, 0xb4, + 0x03, 0xd5, 0x84, 0xe5, 0x07, 0xd4, 0x66, 0x78, 0x22, 0x1a, 0xaa, 0x40, 0xad, 0x36, 0xc7, 0x72, + 0xe2, 0x63, 0x50, 0xd0, 0x66, 0xf4, 0x0d, 0x89, 0x69, 0xec, 0x52, 0xc7, 0x15, 0xc1, 0x4c, 0x34, + 0x59, 0xb0, 0xe0, 0x24, 0x39, 0x74, 0x00, 0x0b, 0xd2, 0xf2, 0x06, 0x39, 0xa4, 0xb6, 0x53, 0x29, + 0xad, 0xe4, 0xb9, 0xbb, 0x9e, 0x79, 0xe8, 0x6b, 0xa1, 0x54, 0xe3, 0x25, 0xa1, 0x7b, 0xa1, 0x19, + 0xc7, 0xc3, 0xa3, 0x2a, 0x10, 0x05, 0x70, 0x0c, 0xdd, 0x24, 0xee, 0xc0, 0xa6, 0x4e, 0x65, 0x8a, + 0x2b, 0x5c, 0x4d, 0xab, 0x70, 0xdb, 0x97, 0x0c, 0xf7, 0x57, 0xb0, 0xe4, 0x60, 0x09, 0x18, 0xdd, + 0x87, 0x45, 0x49, 0x77, 0xc8, 0x54, 0x29, 0xaf, 0xe4, 0xaf, 0xcd, 0x36, 0x5e, 0x1a, 0x1e, 0x2d, + 0x2f, 0x36, 0x93, 0x18, 0x70, 0xb2, 0x1c, 0xda, 0x83, 0x4b, 0x09, 0x6e, 0xdc, 0xa4, 0x1d, 0x83, + 0xb4, 0x0f, 0xfb, 0xb4, 0x32, 0xcd, 0xe3, 0xf0, 0x39, 0x61, 0xd6, 0xa5, 0xe6, 0x31, 0xbc, 0xf8, + 0x58, 0x24, 0x74, 0x2f, 0x12, 0x99, 0x3b, 0x96, 0xb9, 0x6b, 0xe8, 0x15, 0xe0, 0xf0, 0x49, 0xae, + 0xf6, 0x18, 0xf0, 0xa8, 0x8c, 0xfa, 0x73, 0x05, 0xce, 0xf3, 0xe7, 0x46, 0xd7, 0xda, 0x09, 0x8e, + 0x8a, 0x83, 0xd6, 0x61, 0x96, 0xbb, 0x75, 0xd3, 0x70, 0x1c, 0xc3, 0xd4, 0xf9, 0x21, 0x29, 0x37, + 0x2e, 0x08, 0xec, 0xd9, 0x96, 0x44, 0xc3, 0x11, 0x4e, 0xa4, 0x42, 0xa9, 0xeb, 0xed, 0x14, 0x65, + 0x25, 0xcf, 0x72, 0xc8, 0xf0, 0x68, 0xb9, 0x24, 0x62, 0x2d, 0x28, 0x8c, 0x47, 0xf3, 0x6c, 0xf6, + 0x4e, 0x33, 0xe7, 0x11, 0x46, 0x0a, 0x8a, 0xfa, 0xa7, 0x3c, 0xcc, 0x73, 0x35, 0xad, 0x5e, 0xdf, + 0xb2, 0xdd, 0xed, 0x3e, 0xd5, 0xd0, 0x5d, 0x28, 0xec, 0xda, 0x56, 0x4f, 0x64, 0x9c, 0xcb, 0xd2, + 0x91, 0xad, 0xb1, 0x32, 0x11, 0xe6, 0x97, 0xe0, 0x4d, 0xc2, 0x0c, 0xf8, 0x75, 0xdb, 0xea, 0x61, + 0x2e, 0x8e, 0xde, 0x84, 0x9c, 0x6b, 0x71, 0xd5, 0x33, 0x6b, 0xd7, 0x92, 0x40, 0x36, 0x2c, 0x8d, + 0x74, 0xe3, 0x48, 0x25, 0x96, 0x08, 0xdb, 0x16, 0xce, 0xb9, 0x16, 0xea, 0x32, 0xf7, 0x30, 0xb3, + 0xb6, 0xac, 0xae, 0xa1, 0x1d, 0x8a, 0x1c, 0xb2, 0x96, 0x62, 0x8f, 0xb6, 0x89, 0xde, 0x92, 0x24, + 0x65, 0x97, 0x86, 0xab, 0x38, 0x82, 0x8e, 0xde, 0x85, 0x79, 0xdb, 0x37, 0x43, 0x28, 0x2c, 0x72, + 0x85, 0xaf, 0xa5, 0x53, 0x88, 0xa3, 0xc2, 0x8d, 0x17, 0x85, 0xce, 0xf9, 0x18, 0x01, 0xc7, 0xd5, + 0xa0, 0xdb, 0x30, 0x6f, 0x98, 0x5a, 0x77, 0xd0, 0x09, 0x93, 0x49, 0x81, 0xef, 0x84, 0x00, 0xa2, + 0x15, 0x25, 0xe3, 0x38, 0xbf, 0xfa, 0x57, 0x05, 0x16, 0xe4, 0x38, 0xba, 0xc4, 0x1d, 0x38, 0xa8, + 0x0d, 0x25, 0x87, 0xff, 0x12, 0xb1, 0xbc, 0x9e, 0xae, 0x7a, 0x78, 0xd2, 0x8d, 0x39, 0xa1, 0xbd, + 0xe4, 0x3d, 0x63, 0x81, 0x85, 0x5a, 0x50, 0xe4, 0xef, 0x1d, 0xc4, 0x36, 0x65, 0xce, 0x68, 0x4c, + 0x0f, 0x8f, 0x96, 0xbd, 0xca, 0x86, 0x3d, 0x04, 0xbf, 0x4a, 0xe6, 0x93, 0xab, 0xa4, 0xfa, 0x9e, + 0x02, 0x10, 0xa6, 0xac, 0xa0, 0xea, 0x2a, 0x63, 0xab, 0xee, 0x15, 0x28, 0x38, 0xc6, 0x63, 0xcf, + 0xb2, 0x7c, 0x58, 0xc3, 0xb9, 0xf8, 0xb6, 0xf1, 0x98, 0x62, 0x4e, 0x66, 0xf5, 0xbe, 0x17, 0xe4, + 0x8b, 0x7c, 0xb4, 0xde, 0x87, 0xc9, 0x21, 0xe4, 0x51, 0x3b, 0x30, 0x17, 0xda, 0xd1, 0x64, 0x85, + 0xe6, 0x15, 0xa1, 0x49, 0xe1, 0x9a, 0xce, 0x3e, 0x53, 0x4b, 0x2e, 0x85, 0x96, 0xdf, 0x29, 0x30, + 0xed, 0xa9, 0x31, 0x1c, 0x17, 0x3d, 0x1a, 0x29, 0xfe, 0xb5, 0x74, 0xe1, 0x63, 0xd2, 0xbc, 0xf4, + 0x07, 0x2d, 0x8f, 0xbf, 0x22, 0x15, 0xfe, 0x4d, 0x28, 0x1a, 0x2e, 0xed, 0x39, 0x95, 0x1c, 0x4f, + 0xfc, 0xe9, 0x83, 0x78, 0x56, 0x80, 0x16, 0x5b, 0x4c, 0x1c, 0x7b, 0x28, 0xea, 0xba, 0xd8, 0x7e, + 0x1b, 0x96, 0xb5, 0x3f, 0xe8, 0x8b, 0x7d, 0x7d, 0x19, 0x8a, 0x5d, 0x76, 0xc6, 0x45, 0x5e, 0x0b, + 0x24, 0xf9, 0xc1, 0xc7, 0x1e, 0x4d, 0xfd, 0x75, 0x49, 0xf8, 0x36, 0x48, 0xf1, 0x13, 0x68, 0x7b, + 0x56, 0xa0, 0xe0, 0x86, 0x51, 0x09, 0x76, 0x12, 0x0f, 0x08, 0xa7, 0xa0, 0x2b, 0x30, 0xa5, 0x59, + 0xa6, 0x4b, 0x4d, 0x97, 0x5b, 0x3f, 0xdb, 0x98, 0x19, 0x1e, 0x2d, 0x4f, 0xdd, 0xf1, 0x96, 0xb0, + 0x4f, 0x43, 0x06, 0x80, 0x66, 0x99, 0x1d, 0xc3, 0x35, 0x2c, 0xd3, 0xa9, 0x14, 0xb8, 0x2f, 0xd3, + 0xe4, 0x8b, 0xe0, 0x65, 0xef, 0xf8, 0xd2, 0xa1, 0xc5, 0xc1, 0x92, 0x83, 0x25, 0x70, 0xf4, 0x55, + 0x38, 0xcb, 0xc5, 0x5b, 0x1d, 0x6a, 0xba, 0x86, 0x7b, 0x28, 0x1a, 0x8e, 0x45, 0x21, 0x76, 0xb6, + 0x25, 0x13, 0x71, 0x94, 0x17, 0xfd, 0x10, 0x66, 0x59, 0x4d, 0xa6, 0x9d, 0x3b, 0x5d, 0x62, 0xf4, + 0xfc, 0xfe, 0xe2, 0x4e, 0xe6, 0x72, 0xcf, 0x0d, 0xf7, 0x51, 0xee, 0x9a, 0xae, 0x2d, 0xe5, 0x56, + 0x99, 0x84, 0x23, 0xea, 0xd0, 0xdb, 0x30, 0xa5, 0xd9, 0x94, 0x35, 0xee, 0x95, 0x29, 0x1e, 0xd0, + 0x2f, 0xa4, 0x0b, 0x68, 0xdb, 0xe8, 0x51, 0xe1, 0x79, 0x4f, 0x1c, 0xfb, 0x38, 0xec, 0x78, 0x18, + 0x8e, 0x33, 0xa0, 0x9d, 0xc6, 0x61, 0xa5, 0x9c, 0xba, 0x30, 0x04, 0x2f, 0xd2, 0x62, 0xb2, 0x76, + 0x63, 0x96, 0x1d, 0x8f, 0x96, 0xc0, 0xc1, 0x01, 0x22, 0xfa, 0x8e, 0x8f, 0xde, 0xb6, 0x78, 0x43, + 0x31, 0xb3, 0xf6, 0xe5, 0x2c, 0xe8, 0xdb, 0x03, 0xbe, 0xeb, 0x64, 0xf8, 0xb6, 0x85, 0x03, 0xc8, + 0xea, 0x1b, 0xb0, 0x30, 0xe2, 0x48, 0x74, 0x0e, 0xf2, 0xfb, 0x54, 0x8c, 0x2b, 0x98, 0xfd, 0x44, + 0x17, 0xa0, 0x78, 0x40, 0xba, 0x03, 0xb1, 0x4f, 0xb1, 0xf7, 0x70, 0x2b, 0xb7, 0xae, 0xa8, 0xbf, + 0xc8, 0xc1, 0x8c, 0x17, 0x19, 0xd7, 0xa6, 0xa4, 0x37, 0x81, 0x23, 0xd3, 0x86, 0x82, 0xd3, 0xa7, + 0x9a, 0x48, 0xfa, 0x6b, 0xa9, 0x77, 0x0e, 0xb7, 0x8f, 0xf5, 0x15, 0xe1, 0x31, 0x63, 0x4f, 0x98, + 0xa3, 0xa1, 0x47, 0x41, 0x85, 0xf2, 0x8a, 0xfb, 0x8d, 0x8c, 0xb8, 0xc7, 0x56, 0x2a, 0xf5, 0x0f, + 0x0a, 0x9c, 0x93, 0xb8, 0x27, 0x35, 0x54, 0x6d, 0x9e, 0xb4, 0x40, 0x86, 0xb9, 0x55, 0x2a, 0x92, + 0xea, 0x6f, 0x72, 0x22, 0xb9, 0xfa, 0x6f, 0xc1, 0x2a, 0xfc, 0x04, 0x5e, 0xe3, 0x61, 0x24, 0xe2, + 0xeb, 0xd9, 0x22, 0x13, 0xf6, 0x93, 0x89, 0x71, 0xdf, 0x89, 0xc5, 0xfd, 0xd6, 0x89, 0xd0, 0x8f, + 0x8f, 0xfe, 0x8f, 0x73, 0xb0, 0x98, 0x68, 0x11, 0xba, 0x0a, 0x25, 0xaf, 0xf5, 0xe3, 0x9e, 0x2b, + 0x87, 0x08, 0x1e, 0x0f, 0x16, 0x54, 0xa4, 0x03, 0xd8, 0xb4, 0x6f, 0x39, 0x86, 0x6b, 0xd9, 0x87, + 0xc2, 0x0f, 0x5f, 0x49, 0x61, 0x29, 0x0e, 0x84, 0x24, 0x37, 0xcc, 0x31, 0x47, 0x87, 0x14, 0x2c, + 0x41, 0xa3, 0x87, 0xcc, 0x20, 0xa2, 0x53, 0xe6, 0x8e, 0x7c, 0x96, 0xe3, 0x25, 0xe3, 0x87, 0x2f, + 0xc1, 0x90, 0xb0, 0x40, 0x54, 0x7f, 0x9b, 0x83, 0x17, 0xc7, 0xb8, 0x0e, 0xe1, 0x88, 0x23, 0x58, + 0x87, 0x91, 0x29, 0x0c, 0xde, 0x48, 0x11, 0x73, 0x9a, 0x91, 0xe0, 0xb4, 0x9b, 0x27, 0x71, 0x9a, + 0x88, 0xee, 0x31, 0x6e, 0x7b, 0x14, 0x73, 0xdb, 0x8d, 0x8c, 0x6e, 0x8b, 0xed, 0x9f, 0x98, 0xe3, + 0x3e, 0x2c, 0x44, 0xce, 0x9d, 0x18, 0x9b, 0x4f, 0xff, 0xdc, 0x75, 0xa0, 0xb8, 0xd3, 0xb5, 0x76, + 0xfc, 0xd6, 0xec, 0x8d, 0x6c, 0x31, 0xf1, 0xcc, 0xac, 0xb1, 0x49, 0x53, 0x14, 0xe8, 0x20, 0xab, + 0xf0, 0x35, 0xec, 0x81, 0xa3, 0xbd, 0x98, 0xef, 0xde, 0x3c, 0x91, 0x1a, 0xcf, 0x65, 0x9e, 0x9e, + 0x31, 0x7e, 0xac, 0xee, 0x03, 0x84, 0xd6, 0x24, 0x54, 0xb9, 0x7b, 0x72, 0x95, 0xcb, 0x70, 0x07, + 0x11, 0x34, 0xe3, 0x52, 0x61, 0xac, 0x7e, 0x5f, 0xd4, 0xc5, 0xb1, 0xda, 0x36, 0xa2, 0xda, 0x5e, + 0x4f, 0x9d, 0x9c, 0x23, 0xa3, 0xbb, 0x5c, 0x8b, 0xff, 0xa8, 0x88, 0x19, 0x5a, 0x78, 0xe6, 0xf4, + 0x9b, 0xf7, 0xed, 0x68, 0xf3, 0x9e, 0xf5, 0xd4, 0x26, 0xb7, 0xf0, 0xff, 0x50, 0x00, 0x49, 0x5c, + 0x9b, 0xa4, 0xdf, 0x37, 0x4c, 0xfd, 0xff, 0xae, 0x5c, 0x3e, 0x6b, 0xa6, 0xfc, 0x55, 0x2e, 0x12, + 0x2d, 0x5e, 0x0f, 0x4c, 0x98, 0xed, 0x4a, 0x83, 0x4b, 0xd6, 0x5e, 0x44, 0x1e, 0x7a, 0xc2, 0x76, + 0x58, 0x5e, 0xc5, 0x11, 0x7c, 0xb4, 0x1d, 0xb9, 0x13, 0x0b, 0x93, 0x9b, 0x98, 0x6c, 0x5f, 0x16, + 0x10, 0x8b, 0xcd, 0x24, 0x26, 0x9c, 0x2c, 0x8b, 0xde, 0x86, 0x82, 0x4b, 0x74, 0x7f, 0x4f, 0xd4, + 0x33, 0x5e, 0x5a, 0x48, 0x43, 0x10, 0xd1, 0x1d, 0xcc, 0xa1, 0xd4, 0x5f, 0x46, 0x3b, 0x0f, 0x51, + 0x34, 0x4e, 0xc5, 0x7a, 0x0a, 0x17, 0xfb, 0x83, 0x9d, 0xae, 0xa1, 0x25, 0x4a, 0x89, 0x68, 0x5e, + 0x16, 0xd0, 0x17, 0xb7, 0xc6, 0xb3, 0xe2, 0xe3, 0x70, 0xd0, 0x83, 0x88, 0x93, 0xd2, 0x44, 0xf8, + 0x2d, 0xd2, 0xa3, 0x9d, 0x36, 0xd1, 0xef, 0x1e, 0x50, 0xd3, 0x65, 0x67, 0x31, 0xd1, 0x53, 0x1f, + 0x14, 0xfc, 0x29, 0x96, 0x7b, 0xaa, 0x4d, 0x26, 0x71, 0x70, 0xbe, 0xe1, 0xed, 0x74, 0xef, 0xd8, + 0x64, 0x0e, 0xf8, 0x54, 0xe4, 0x83, 0xc4, 0x1a, 0x80, 0xf8, 0xb8, 0x62, 0x58, 0x26, 0x77, 0x77, + 0x3e, 0xd4, 0x7e, 0x2f, 0xa0, 0x60, 0x89, 0x6b, 0xe4, 0xd8, 0x94, 0x4e, 0xf9, 0xd8, 0xec, 0x25, + 0x0c, 0xdb, 0x37, 0xd2, 0xbd, 0x36, 0x8f, 0x5e, 0xfa, 0x59, 0x3b, 0x48, 0x49, 0xc5, 0xe7, 0xd2, + 0xc1, 0xff, 0x39, 0x9a, 0x5a, 0xdb, 0x44, 0x9f, 0x40, 0x91, 0x78, 0x10, 0x2d, 0x12, 0xab, 0xd9, + 0x8a, 0x44, 0x9b, 0xe8, 0x63, 0xea, 0xc4, 0xa7, 0x39, 0x28, 0x73, 0xc6, 0xc9, 0x6c, 0xf2, 0xcd, + 0xc8, 0x14, 0x92, 0x79, 0x97, 0x97, 0x63, 0x83, 0xc7, 0xb7, 0x4f, 0x30, 0x70, 0x8e, 0xa6, 0x00, + 0x38, 0xee, 0x5a, 0xb4, 0xf0, 0x9f, 0x5e, 0x8b, 0xaa, 0xbf, 0x57, 0x60, 0xd6, 0x77, 0xf1, 0x04, + 0x76, 0xca, 0x56, 0x74, 0xa7, 0x7c, 0x31, 0xad, 0xe5, 0xe3, 0xf7, 0xc8, 0x3f, 0x15, 0x58, 0x18, + 0xf1, 0x9a, 0x5f, 0x99, 0x95, 0x31, 0xdf, 0x44, 0x4f, 0x60, 0x86, 0x0f, 0x9f, 0x6c, 0x46, 0x2c, + 0x61, 0xe4, 0x4f, 0x2f, 0x61, 0xa8, 0xef, 0xe7, 0xe1, 0x42, 0xd2, 0xd4, 0xf7, 0xbc, 0x3e, 0xa6, + 0xc4, 0x3f, 0x85, 0xe4, 0x26, 0xfd, 0x29, 0xa4, 0xf0, 0x5f, 0xfb, 0x14, 0x92, 0xcf, 0xf8, 0x29, + 0xe4, 0xfd, 0x1c, 0xbc, 0x90, 0x3c, 0x4b, 0x9e, 0xd2, 0xf7, 0x90, 0x70, 0x0a, 0xcd, 0x3d, 0xff, + 0x29, 0x14, 0xdd, 0x82, 0x39, 0xd2, 0xf1, 0xb6, 0x19, 0xe9, 0xb2, 0x8e, 0x83, 0xef, 0xe3, 0xe9, + 0x06, 0x1a, 0x1e, 0x2d, 0xcf, 0xdd, 0x8e, 0x50, 0x70, 0x8c, 0x53, 0xfd, 0x38, 0x0f, 0x68, 0xf4, + 0xa6, 0x19, 0xdd, 0x12, 0xb7, 0xdf, 0xde, 0x41, 0xbc, 0x2a, 0xdf, 0x7e, 0x7f, 0x76, 0xb4, 0xfc, + 0xc2, 0xa8, 0x84, 0x74, 0x2f, 0xbe, 0x11, 0xb8, 0xd0, 0xbb, 0x3b, 0xbf, 0x11, 0x75, 0xca, 0x67, + 0x47, 0xcb, 0x09, 0x7f, 0x2b, 0xa9, 0x05, 0x48, 0x31, 0xd7, 0xe9, 0x70, 0xb6, 0x4b, 0x1c, 0x77, + 0xcb, 0xb6, 0x76, 0x68, 0xdb, 0x10, 0x7f, 0xa8, 0xc8, 0x76, 0x3b, 0x1c, 0xdc, 0x7f, 0x6f, 0xc8, + 0x40, 0x38, 0x8a, 0x8b, 0x0e, 0x00, 0xb1, 0x85, 0xb6, 0x4d, 0x4c, 0xc7, 0x7b, 0x25, 0xa6, 0xad, + 0x90, 0x59, 0x5b, 0x55, 0x68, 0x43, 0x1b, 0x23, 0x68, 0x38, 0x41, 0x03, 0xba, 0x0a, 0x25, 0x9b, + 0x12, 0xc7, 0x32, 0xc5, 0x6d, 0x7d, 0x10, 0x65, 0xcc, 0x57, 0xb1, 0xa0, 0xa2, 0xcf, 0xc3, 0x54, + 0x8f, 0x3a, 0x0e, 0x2b, 0x1f, 0x25, 0xce, 0x38, 0x2f, 0x18, 0xa7, 0x36, 0xbd, 0x65, 0xec, 0xd3, + 0xd5, 0xf7, 0x14, 0x08, 0x43, 0xc4, 0x3b, 0x33, 0x43, 0xbb, 0xeb, 0xdd, 0xf2, 0xaf, 0xc3, 0xac, + 0x65, 0xeb, 0xc4, 0x34, 0x1e, 0x7b, 0x6d, 0x9c, 0x17, 0xe0, 0xe0, 0xc4, 0xdf, 0x97, 0x68, 0x38, + 0xc2, 0xc9, 0xda, 0x3f, 0xcd, 0xea, 0xf5, 0x2c, 0x93, 0x65, 0x6d, 0x11, 0x5a, 0x29, 0xe7, 0xf9, + 0x14, 0x2c, 0x71, 0xa9, 0x1f, 0x2a, 0x30, 0x1f, 0xbb, 0x4f, 0x47, 0x3f, 0x53, 0xe0, 0x05, 0x27, + 0xd1, 0x38, 0x71, 0xe4, 0x6e, 0x66, 0xb9, 0x46, 0x8f, 0x00, 0x34, 0x96, 0x84, 0x3d, 0x63, 0xde, + 0x1e, 0x8f, 0x51, 0xac, 0xfe, 0x4d, 0x81, 0x73, 0xf1, 0x9b, 0xf9, 0xff, 0x45, 0x43, 0xd1, 0x6b, + 0x30, 0xe3, 0xcd, 0x2e, 0xdf, 0xa4, 0x87, 0xad, 0xa6, 0x88, 0xc2, 0x79, 0x01, 0x36, 0xb3, 0x15, + 0x92, 0xb0, 0xcc, 0xa7, 0xfe, 0x24, 0x07, 0x65, 0xbf, 0x62, 0xa1, 0x6f, 0x85, 0x5f, 0x5a, 0x94, + 0xcc, 0xbb, 0x3b, 0xd8, 0x74, 0x23, 0x5f, 0x5b, 0x9e, 0xff, 0xff, 0x84, 0x2e, 0xfb, 0xed, 0x92, + 0x37, 0xda, 0x25, 0xcf, 0xf2, 0xd1, 0xa9, 0xa4, 0x90, 0x66, 0x2a, 0x51, 0x3f, 0xc8, 0xc3, 0xc2, + 0x48, 0x01, 0x47, 0x37, 0x23, 0x39, 0xef, 0x4a, 0x2c, 0xe7, 0x2d, 0x8e, 0x08, 0x9c, 0x5a, 0xca, + 0x4b, 0xce, 0x44, 0xf9, 0x09, 0x66, 0xa2, 0x42, 0xda, 0x4c, 0x54, 0x3c, 0x3e, 0x13, 0xc5, 0xa2, + 0x53, 0x4a, 0x15, 0x9d, 0x3e, 0xcc, 0xc7, 0x3a, 0x12, 0x74, 0x1d, 0xca, 0x86, 0xe9, 0x50, 0x6d, + 0x60, 0x53, 0x71, 0x1f, 0x1f, 0x34, 0xab, 0x2d, 0xb1, 0x8e, 0x03, 0x0e, 0x54, 0x87, 0x69, 0x47, + 0xdb, 0xa3, 0x9d, 0x41, 0x97, 0x76, 0x78, 0x40, 0xca, 0xe1, 0x57, 0xf5, 0x6d, 0x9f, 0x80, 0x43, + 0x1e, 0xf5, 0x5f, 0x05, 0x98, 0x95, 0x7b, 0x92, 0x14, 0x7f, 0x23, 0x78, 0x07, 0x66, 0x88, 0x69, + 0x5a, 0x2e, 0xf1, 0x1a, 0xc7, 0x5c, 0xea, 0x0b, 0x52, 0x59, 0x4f, 0xed, 0x76, 0x08, 0xe1, 0x5d, + 0x90, 0x06, 0x47, 0x59, 0xa2, 0x60, 0x59, 0x13, 0xba, 0x2d, 0xba, 0xc5, 0x7c, 0xfa, 0x6e, 0xb1, + 0x1c, 0xeb, 0x14, 0xeb, 0x30, 0x1d, 0x34, 0x55, 0xe2, 0x6f, 0x24, 0x81, 0x7f, 0xc2, 0x33, 0x19, + 0xf2, 0xa0, 0x5a, 0x24, 0x8a, 0x45, 0x1e, 0xc5, 0xb9, 0x63, 0xa6, 0xfe, 0x78, 0x2b, 0x5a, 0x9a, + 0x74, 0x2b, 0x3a, 0x35, 0x91, 0x56, 0xb4, 0xfa, 0x35, 0x38, 0x17, 0x8f, 0x60, 0xa6, 0x4f, 0xb4, + 0x5b, 0x80, 0x46, 0xf5, 0x3f, 0xab, 0xf7, 0x1a, 0x95, 0x08, 0x13, 0x51, 0xe3, 0xda, 0x93, 0xa7, + 0x4b, 0x67, 0x3e, 0x7a, 0xba, 0x74, 0xe6, 0x93, 0xa7, 0x4b, 0x67, 0x7e, 0x34, 0x5c, 0x52, 0x9e, + 0x0c, 0x97, 0x94, 0x8f, 0x86, 0x4b, 0xca, 0x27, 0xc3, 0x25, 0xe5, 0xd3, 0xe1, 0x92, 0xf2, 0xd3, + 0xbf, 0x2f, 0x9d, 0x79, 0x98, 0x3b, 0x58, 0xfd, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x39, 0xdb, + 0xfe, 0xb3, 0x60, 0x2c, 0x00, 0x00, +} + +func (m *DockerImageReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DockerImageReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DockerImageReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0x2a + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Registry) + copy(dAtA[i:], m.Registry) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Image) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Image) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Image) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DockerImageConfig) + copy(dAtA[i:], m.DockerImageConfig) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageConfig))) + i-- + dAtA[i] = 0x52 + i -= len(m.DockerImageManifestMediaType) + copy(dAtA[i:], m.DockerImageManifestMediaType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageManifestMediaType))) + i-- + dAtA[i] = 0x4a + if len(m.DockerImageSignatures) > 0 { + for iNdEx := len(m.DockerImageSignatures) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DockerImageSignatures[iNdEx]) + copy(dAtA[i:], m.DockerImageSignatures[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageSignatures[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.Signatures) > 0 { + for iNdEx := len(m.Signatures) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Signatures[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.DockerImageLayers) > 0 { + for iNdEx := len(m.DockerImageLayers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DockerImageLayers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.DockerImageManifest) + copy(dAtA[i:], m.DockerImageManifest) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageManifest))) + i-- + dAtA[i] = 0x2a + i -= len(m.DockerImageMetadataVersion) + copy(dAtA[i:], m.DockerImageMetadataVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageMetadataVersion))) + i-- + dAtA[i] = 0x22 + { + size, err := m.DockerImageMetadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.DockerImageReference) + copy(dAtA[i:], m.DockerImageReference) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageReference))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageBlobReferences) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageBlobReferences) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageBlobReferences) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ImageMissing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if m.Config != nil { + i -= len(*m.Config) + copy(dAtA[i:], *m.Config) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Config))) + i-- + dAtA[i] = 0x12 + } + if len(m.Layers) > 0 { + for iNdEx := len(m.Layers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Layers[iNdEx]) + copy(dAtA[i:], m.Layers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Layers[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ImageImportSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageImportSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageImportSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ReferencePolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + i-- + if m.IncludeManifest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + { + size, err := m.ImportPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.To != nil { + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageImportStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageImportStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageImportStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0x1a + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageLayer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLayer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageLayer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MediaType) + copy(dAtA[i:], m.MediaType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MediaType))) + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.LayerSize)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageLayerData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLayerData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageLayerData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MediaType) + copy(dAtA[i:], m.MediaType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MediaType))) + i-- + dAtA[i] = 0x12 + if m.LayerSize != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LayerSize)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ImageList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageLookupPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLookupPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageLookupPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Local { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} + +func (m *ImageSignature) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageSignature) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageSignature) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IssuedTo != nil { + { + size, err := m.IssuedTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.IssuedBy != nil { + { + size, err := m.IssuedBy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Created != nil { + { + size, err := m.Created.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if len(m.SignedClaims) > 0 { + keysForSignedClaims := make([]string, 0, len(m.SignedClaims)) + for k := range m.SignedClaims { + keysForSignedClaims = append(keysForSignedClaims, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSignedClaims) + for iNdEx := len(keysForSignedClaims) - 1; iNdEx >= 0; iNdEx-- { + v := m.SignedClaims[string(keysForSignedClaims[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSignedClaims[iNdEx]) + copy(dAtA[i:], keysForSignedClaims[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSignedClaims[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.ImageIdentity) + copy(dAtA[i:], m.ImageIdentity) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageIdentity))) + i-- + dAtA[i] = 0x2a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.Content != nil { + i -= len(m.Content) + copy(dAtA[i:], m.Content) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Content))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStream) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStream) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStream) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamImage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamImage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamImage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamImport) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamImport) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamImport) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamImportSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamImportSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamImportSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Repository != nil { + { + size, err := m.Repository.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i-- + if m.Import { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ImageStreamImportStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamImportStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamImportStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Repository != nil { + { + size, err := m.Repository.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Import != nil { + { + size, err := m.Import.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ImageStreamLayers) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamLayers) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamLayers) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Images) > 0 { + keysForImages := make([]string, 0, len(m.Images)) + for k := range m.Images { + keysForImages = append(keysForImages, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForImages) + for iNdEx := len(keysForImages) - 1; iNdEx >= 0; iNdEx-- { + v := m.Images[string(keysForImages[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForImages[iNdEx]) + copy(dAtA[i:], keysForImages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForImages[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Blobs) > 0 { + keysForBlobs := make([]string, 0, len(m.Blobs)) + for k := range m.Blobs { + keysForBlobs = append(keysForBlobs, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBlobs) + for iNdEx := len(keysForBlobs) - 1; iNdEx >= 0; iNdEx-- { + v := m.Blobs[string(keysForBlobs[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForBlobs[iNdEx]) + copy(dAtA[i:], keysForBlobs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForBlobs[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamMapping) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamMapping) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamMapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LookupPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Tags) > 0 { + for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tags[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.DockerImageRepository) + copy(dAtA[i:], m.DockerImageRepository) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageRepository))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.PublicDockerImageRepository) + copy(dAtA[i:], m.PublicDockerImageRepository) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PublicDockerImageRepository))) + i-- + dAtA[i] = 0x1a + if len(m.Tags) > 0 { + for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tags[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.DockerImageRepository) + copy(dAtA[i:], m.DockerImageRepository) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageRepository))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamTag) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamTag) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamTag) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LookupPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x18 + if m.Tag != nil { + { + size, err := m.Tag.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamTagList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamTagList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamTagList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageTag) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageTag) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageTag) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Spec != nil { + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageTagList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageTagList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageTagList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamedTagEventList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedTagEventList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedTagEventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RepositoryImportSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RepositoryImportSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RepositoryImportSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ReferencePolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + i-- + if m.IncludeManifest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + { + size, err := m.ImportPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RepositoryImportStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RepositoryImportStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RepositoryImportStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AdditionalTags) > 0 { + for iNdEx := len(m.AdditionalTags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdditionalTags[iNdEx]) + copy(dAtA[i:], m.AdditionalTags[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdditionalTags[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignatureCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignatureGenericEntity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureGenericEntity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureGenericEntity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.CommonName) + copy(dAtA[i:], m.CommonName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CommonName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Organization) + copy(dAtA[i:], m.Organization) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Organization))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignatureIssuer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureIssuer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureIssuer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.SignatureGenericEntity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignatureSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.PublicKeyID) + copy(dAtA[i:], m.PublicKeyID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PublicKeyID))) + i-- + dAtA[i] = 0x12 + { + size, err := m.SignatureGenericEntity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TagEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x20 + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x1a + i -= len(m.DockerImageReference) + copy(dAtA[i:], m.DockerImageReference) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageReference))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Created.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TagEventCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagEventCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagEventCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x30 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TagImportPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagImportPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagImportPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Scheduled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.Insecure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *TagReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ReferencePolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.ImportPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if m.Generation != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Generation)) + i-- + dAtA[i] = 0x28 + } + i-- + if m.Reference { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if m.From != nil { + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TagReferencePolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagReferencePolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagReferencePolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DockerImageReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Registry) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Tag) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Image) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageReference) + n += 1 + l + sovGenerated(uint64(l)) + l = m.DockerImageMetadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageMetadataVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageManifest) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.DockerImageLayers) > 0 { + for _, e := range m.DockerImageLayers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Signatures) > 0 { + for _, e := range m.Signatures { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.DockerImageSignatures) > 0 { + for _, b := range m.DockerImageSignatures { + l = len(b) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.DockerImageManifestMediaType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageConfig) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageBlobReferences) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Layers) > 0 { + for _, s := range m.Layers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Config != nil { + l = len(*m.Config) + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *ImageImportSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.To != nil { + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.ImportPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = m.ReferencePolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageImportStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Image != nil { + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Tag) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageLayer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.LayerSize)) + l = len(m.MediaType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageLayerData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LayerSize != nil { + n += 1 + sovGenerated(uint64(*m.LayerSize)) + } + l = len(m.MediaType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageLookupPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + +func (m *ImageSignature) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Content != nil { + l = len(m.Content) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ImageIdentity) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.SignedClaims) > 0 { + for k, v := range m.SignedClaims { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Created != nil { + l = m.Created.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IssuedBy != nil { + l = m.IssuedBy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IssuedTo != nil { + l = m.IssuedTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ImageStream) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamImage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamImport) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamImportSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if m.Repository != nil { + l = m.Repository.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageStreamImportStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Import != nil { + l = m.Import.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Repository != nil { + l = m.Repository.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageStreamLayers) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Blobs) > 0 { + for k, v := range m.Blobs { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Images) > 0 { + for k, v := range m.Images { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ImageStreamList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageStreamMapping) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Tag) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DockerImageRepository) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Tags) > 0 { + for _, e := range m.Tags { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.LookupPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DockerImageRepository) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Tags) > 0 { + for _, e := range m.Tags { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.PublicDockerImageRepository) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamTag) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Tag != nil { + l = m.Tag.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Generation)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LookupPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamTagList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageTag) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Image != nil { + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ImageTagList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NamedTagEventList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tag) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RepositoryImportSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.ImportPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = m.ReferencePolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RepositoryImportStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.AdditionalTags) > 0 { + for _, s := range m.AdditionalTags { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SignatureCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SignatureGenericEntity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Organization) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CommonName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SignatureIssuer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.SignatureGenericEntity.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SignatureSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.SignatureGenericEntity.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PublicKeyID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TagEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Created.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageReference) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + return n +} + +func (m *TagEventCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + return n +} + +func (m *TagImportPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + return n +} + +func (m *TagReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.Generation != nil { + n += 1 + sovGenerated(uint64(*m.Generation)) + } + l = m.ImportPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.ReferencePolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TagReferencePolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DockerImageReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DockerImageReference{`, + `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *Image) String() string { + if this == nil { + return "nil" + } + repeatedStringForDockerImageLayers := "[]ImageLayer{" + for _, f := range this.DockerImageLayers { + repeatedStringForDockerImageLayers += strings.Replace(strings.Replace(f.String(), "ImageLayer", "ImageLayer", 1), `&`, ``, 1) + "," + } + repeatedStringForDockerImageLayers += "}" + repeatedStringForSignatures := "[]ImageSignature{" + for _, f := range this.Signatures { + repeatedStringForSignatures += strings.Replace(strings.Replace(f.String(), "ImageSignature", "ImageSignature", 1), `&`, ``, 1) + "," + } + repeatedStringForSignatures += "}" + s := strings.Join([]string{`&Image{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DockerImageReference:` + fmt.Sprintf("%v", this.DockerImageReference) + `,`, + `DockerImageMetadata:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DockerImageMetadata), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `DockerImageMetadataVersion:` + fmt.Sprintf("%v", this.DockerImageMetadataVersion) + `,`, + `DockerImageManifest:` + fmt.Sprintf("%v", this.DockerImageManifest) + `,`, + `DockerImageLayers:` + repeatedStringForDockerImageLayers + `,`, + `Signatures:` + repeatedStringForSignatures + `,`, + `DockerImageSignatures:` + fmt.Sprintf("%v", this.DockerImageSignatures) + `,`, + `DockerImageManifestMediaType:` + fmt.Sprintf("%v", this.DockerImageManifestMediaType) + `,`, + `DockerImageConfig:` + fmt.Sprintf("%v", this.DockerImageConfig) + `,`, + `}`, + }, "") + return s +} +func (this *ImageBlobReferences) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageBlobReferences{`, + `Layers:` + fmt.Sprintf("%v", this.Layers) + `,`, + `Config:` + valueToStringGenerated(this.Config) + `,`, + `ImageMissing:` + fmt.Sprintf("%v", this.ImageMissing) + `,`, + `}`, + }, "") + return s +} +func (this *ImageImportSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageImportSpec{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `To:` + strings.Replace(fmt.Sprintf("%v", this.To), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `ImportPolicy:` + strings.Replace(strings.Replace(this.ImportPolicy.String(), "TagImportPolicy", "TagImportPolicy", 1), `&`, ``, 1) + `,`, + `IncludeManifest:` + fmt.Sprintf("%v", this.IncludeManifest) + `,`, + `ReferencePolicy:` + strings.Replace(strings.Replace(this.ReferencePolicy.String(), "TagReferencePolicy", "TagReferencePolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageImportStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageImportStatus{`, + `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "v1.Status", 1), `&`, ``, 1) + `,`, + `Image:` + strings.Replace(this.Image.String(), "Image", "Image", 1) + `,`, + `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, + `}`, + }, "") + return s +} +func (this *ImageLayer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLayer{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `LayerSize:` + fmt.Sprintf("%v", this.LayerSize) + `,`, + `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, + `}`, + }, "") + return s +} +func (this *ImageLayerData) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLayerData{`, + `LayerSize:` + valueToStringGenerated(this.LayerSize) + `,`, + `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, + `}`, + }, "") + return s +} +func (this *ImageList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Image{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Image", "Image", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ImageList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ImageLookupPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLookupPolicy{`, + `Local:` + fmt.Sprintf("%v", this.Local) + `,`, + `}`, + }, "") + return s +} +func (this *ImageSignature) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]SignatureCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "SignatureCondition", "SignatureCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + keysForSignedClaims := make([]string, 0, len(this.SignedClaims)) + for k := range this.SignedClaims { + keysForSignedClaims = append(keysForSignedClaims, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSignedClaims) + mapStringForSignedClaims := "map[string]string{" + for _, k := range keysForSignedClaims { + mapStringForSignedClaims += fmt.Sprintf("%v: %v,", k, this.SignedClaims[k]) + } + mapStringForSignedClaims += "}" + s := strings.Join([]string{`&ImageSignature{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Content:` + valueToStringGenerated(this.Content) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `ImageIdentity:` + fmt.Sprintf("%v", this.ImageIdentity) + `,`, + `SignedClaims:` + mapStringForSignedClaims + `,`, + `Created:` + strings.Replace(fmt.Sprintf("%v", this.Created), "Time", "v1.Time", 1) + `,`, + `IssuedBy:` + strings.Replace(this.IssuedBy.String(), "SignatureIssuer", "SignatureIssuer", 1) + `,`, + `IssuedTo:` + strings.Replace(this.IssuedTo.String(), "SignatureSubject", "SignatureSubject", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStream) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStream{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageStreamSpec", "ImageStreamSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageStreamStatus", "ImageStreamStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamImage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStreamImage{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamImport) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStreamImport{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageStreamImportSpec", "ImageStreamImportSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageStreamImportStatus", "ImageStreamImportStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamImportSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForImages := "[]ImageImportSpec{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageImportSpec", "ImageImportSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + s := strings.Join([]string{`&ImageStreamImportSpec{`, + `Import:` + fmt.Sprintf("%v", this.Import) + `,`, + `Repository:` + strings.Replace(this.Repository.String(), "RepositoryImportSpec", "RepositoryImportSpec", 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamImportStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForImages := "[]ImageImportStatus{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageImportStatus", "ImageImportStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + s := strings.Join([]string{`&ImageStreamImportStatus{`, + `Import:` + strings.Replace(this.Import.String(), "ImageStream", "ImageStream", 1) + `,`, + `Repository:` + strings.Replace(this.Repository.String(), "RepositoryImportStatus", "RepositoryImportStatus", 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamLayers) String() string { + if this == nil { + return "nil" + } + keysForBlobs := make([]string, 0, len(this.Blobs)) + for k := range this.Blobs { + keysForBlobs = append(keysForBlobs, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBlobs) + mapStringForBlobs := "map[string]ImageLayerData{" + for _, k := range keysForBlobs { + mapStringForBlobs += fmt.Sprintf("%v: %v,", k, this.Blobs[k]) + } + mapStringForBlobs += "}" + keysForImages := make([]string, 0, len(this.Images)) + for k := range this.Images { + keysForImages = append(keysForImages, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForImages) + mapStringForImages := "map[string]ImageBlobReferences{" + for _, k := range keysForImages { + mapStringForImages += fmt.Sprintf("%v: %v,", k, this.Images[k]) + } + mapStringForImages += "}" + s := strings.Join([]string{`&ImageStreamLayers{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Blobs:` + mapStringForBlobs + `,`, + `Images:` + mapStringForImages + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ImageStream{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ImageStream", "ImageStream", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ImageStreamList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamMapping) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStreamMapping{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, + `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTags := "[]TagReference{" + for _, f := range this.Tags { + repeatedStringForTags += strings.Replace(strings.Replace(f.String(), "TagReference", "TagReference", 1), `&`, ``, 1) + "," + } + repeatedStringForTags += "}" + s := strings.Join([]string{`&ImageStreamSpec{`, + `DockerImageRepository:` + fmt.Sprintf("%v", this.DockerImageRepository) + `,`, + `Tags:` + repeatedStringForTags + `,`, + `LookupPolicy:` + strings.Replace(strings.Replace(this.LookupPolicy.String(), "ImageLookupPolicy", "ImageLookupPolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForTags := "[]NamedTagEventList{" + for _, f := range this.Tags { + repeatedStringForTags += strings.Replace(strings.Replace(f.String(), "NamedTagEventList", "NamedTagEventList", 1), `&`, ``, 1) + "," + } + repeatedStringForTags += "}" + s := strings.Join([]string{`&ImageStreamStatus{`, + `DockerImageRepository:` + fmt.Sprintf("%v", this.DockerImageRepository) + `,`, + `Tags:` + repeatedStringForTags + `,`, + `PublicDockerImageRepository:` + fmt.Sprintf("%v", this.PublicDockerImageRepository) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamTag) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]TagEventCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "TagEventCondition", "TagEventCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ImageStreamTag{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Tag:` + strings.Replace(this.Tag.String(), "TagReference", "TagReference", 1) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, + `LookupPolicy:` + strings.Replace(strings.Replace(this.LookupPolicy.String(), "ImageLookupPolicy", "ImageLookupPolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamTagList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ImageStreamTag{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ImageStreamTag", "ImageStreamTag", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ImageStreamTagList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ImageTag) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageTag{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(this.Spec.String(), "TagReference", "TagReference", 1) + `,`, + `Status:` + strings.Replace(this.Status.String(), "NamedTagEventList", "NamedTagEventList", 1) + `,`, + `Image:` + strings.Replace(this.Image.String(), "Image", "Image", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageTagList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ImageTag{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ImageTag", "ImageTag", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ImageTagList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NamedTagEventList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]TagEvent{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "TagEvent", "TagEvent", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + repeatedStringForConditions := "[]TagEventCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "TagEventCondition", "TagEventCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&NamedTagEventList{`, + `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, + `Items:` + repeatedStringForItems + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *RepositoryImportSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RepositoryImportSpec{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `ImportPolicy:` + strings.Replace(strings.Replace(this.ImportPolicy.String(), "TagImportPolicy", "TagImportPolicy", 1), `&`, ``, 1) + `,`, + `IncludeManifest:` + fmt.Sprintf("%v", this.IncludeManifest) + `,`, + `ReferencePolicy:` + strings.Replace(strings.Replace(this.ReferencePolicy.String(), "TagReferencePolicy", "TagReferencePolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RepositoryImportStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForImages := "[]ImageImportStatus{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageImportStatus", "ImageImportStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + s := strings.Join([]string{`&RepositoryImportStatus{`, + `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "v1.Status", 1), `&`, ``, 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `AdditionalTags:` + fmt.Sprintf("%v", this.AdditionalTags) + `,`, + `}`, + }, "") + return s +} +func (this *SignatureCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SignatureCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *SignatureGenericEntity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SignatureGenericEntity{`, + `Organization:` + fmt.Sprintf("%v", this.Organization) + `,`, + `CommonName:` + fmt.Sprintf("%v", this.CommonName) + `,`, + `}`, + }, "") + return s +} +func (this *SignatureIssuer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SignatureIssuer{`, + `SignatureGenericEntity:` + strings.Replace(strings.Replace(this.SignatureGenericEntity.String(), "SignatureGenericEntity", "SignatureGenericEntity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SignatureSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SignatureSubject{`, + `SignatureGenericEntity:` + strings.Replace(strings.Replace(this.SignatureGenericEntity.String(), "SignatureGenericEntity", "SignatureGenericEntity", 1), `&`, ``, 1) + `,`, + `PublicKeyID:` + fmt.Sprintf("%v", this.PublicKeyID) + `,`, + `}`, + }, "") + return s +} +func (this *TagEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagEvent{`, + `Created:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Created), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DockerImageReference:` + fmt.Sprintf("%v", this.DockerImageReference) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `}`, + }, "") + return s +} +func (this *TagEventCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagEventCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `}`, + }, "") + return s +} +func (this *TagImportPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagImportPolicy{`, + `Insecure:` + fmt.Sprintf("%v", this.Insecure) + `,`, + `Scheduled:` + fmt.Sprintf("%v", this.Scheduled) + `,`, + `}`, + }, "") + return s +} +func (this *TagReference) String() string { + if this == nil { + return "nil" + } + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&TagReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Reference:` + fmt.Sprintf("%v", this.Reference) + `,`, + `Generation:` + valueToStringGenerated(this.Generation) + `,`, + `ImportPolicy:` + strings.Replace(strings.Replace(this.ImportPolicy.String(), "TagImportPolicy", "TagImportPolicy", 1), `&`, ``, 1) + `,`, + `ReferencePolicy:` + strings.Replace(strings.Replace(this.ReferencePolicy.String(), "TagReferencePolicy", "TagReferencePolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TagReferencePolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagReferencePolicy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DockerImageReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DockerImageReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DockerImageReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Registry", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Registry = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Image) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Image: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Image: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageReference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageReference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DockerImageMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageMetadataVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageMetadataVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageManifest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageManifest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageLayers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageLayers = append(m.DockerImageLayers, ImageLayer{}) + if err := m.DockerImageLayers[len(m.DockerImageLayers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signatures", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signatures = append(m.Signatures, ImageSignature{}) + if err := m.Signatures[len(m.Signatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageSignatures", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageSignatures = append(m.DockerImageSignatures, make([]byte, postIndex-iNdEx)) + copy(m.DockerImageSignatures[len(m.DockerImageSignatures)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageManifestMediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageManifestMediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageConfig", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageConfig = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageBlobReferences) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageBlobReferences: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageBlobReferences: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Layers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Layers = append(m.Layers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Config = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageMissing", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ImageMissing = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageImportSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageImportSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageImportSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.To == nil { + m.To = &v11.LocalObjectReference{} + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ImportPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeManifest", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeManifest = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ReferencePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageImportStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageImportStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageImportStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &Image{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageLayer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLayer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLayer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LayerSize", wireType) + } + m.LayerSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LayerSize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageLayerData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLayerData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLayerData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LayerSize", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LayerSize = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Image{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageLookupPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLookupPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLookupPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Local", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Local = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageSignature) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageSignature: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageSignature: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Content", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Content = append(m.Content[:0], dAtA[iNdEx:postIndex]...) + if m.Content == nil { + m.Content = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, SignatureCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageIdentity", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageIdentity = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedClaims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SignedClaims == nil { + m.SignedClaims = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.SignedClaims[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Created == nil { + m.Created = &v1.Time{} + } + if err := m.Created.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IssuedBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IssuedBy == nil { + m.IssuedBy = &SignatureIssuer{} + } + if err := m.IssuedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IssuedTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IssuedTo == nil { + m.IssuedTo = &SignatureSubject{} + } + if err := m.IssuedTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStream) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStream: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStream: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamImage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamImage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamImage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamImport) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamImport: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamImport: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamImportSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamImportSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamImportSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Import", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Import = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Repository == nil { + m.Repository = &RepositoryImportSpec{} + } + if err := m.Repository.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ImageImportSpec{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamImportStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamImportStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamImportStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Import", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Import == nil { + m.Import = &ImageStream{} + } + if err := m.Import.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Repository == nil { + m.Repository = &RepositoryImportStatus{} + } + if err := m.Repository.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ImageImportStatus{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamLayers) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamLayers: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamLayers: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blobs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Blobs == nil { + m.Blobs = make(map[string]ImageLayerData) + } + var mapkey string + mapvalue := &ImageLayerData{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ImageLayerData{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Blobs[mapkey] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Images == nil { + m.Images = make(map[string]ImageBlobReferences) + } + var mapkey string + mapvalue := &ImageBlobReferences{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ImageBlobReferences{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Images[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ImageStream{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamMapping) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamMapping: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamMapping: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageRepository", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageRepository = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, TagReference{}) + if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LookupPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LookupPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageRepository", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageRepository = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, NamedTagEventList{}) + if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PublicDockerImageRepository", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PublicDockerImageRepository = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamTag) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamTag: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamTag: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tag == nil { + m.Tag = &TagReference{} + } + if err := m.Tag.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, TagEventCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LookupPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LookupPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamTagList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamTagList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamTagList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ImageStreamTag{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageTag) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageTag: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageTag: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &TagReference{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &NamedTagEventList{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &Image{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageTagList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageTagList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageTagList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ImageTag{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedTagEventList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedTagEventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedTagEventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, TagEvent{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, TagEventCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RepositoryImportSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RepositoryImportSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RepositoryImportSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ImportPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeManifest", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeManifest = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ReferencePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RepositoryImportStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RepositoryImportStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RepositoryImportStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ImageImportStatus{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalTags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalTags = append(m.AdditionalTags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = SignatureConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureGenericEntity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureGenericEntity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureGenericEntity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Organization", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Organization = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CommonName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureIssuer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureIssuer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureIssuer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureGenericEntity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SignatureGenericEntity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureGenericEntity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SignatureGenericEntity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PublicKeyID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PublicKeyID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Created.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageReference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageReference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagEventCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagEventCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagEventCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = TagEventConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagImportPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagImportPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagImportPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Insecure = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheduled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Scheduled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &v11.ObjectReference{} + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Reference = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Generation = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ImportPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ReferencePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagReferencePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagReferencePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagReferencePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = TagReferencePolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/openshift/api/image/v1/generated.proto b/vendor/github.com/openshift/api/image/v1/generated.proto new file mode 100644 index 0000000000000000000000000000000000000000..69ed48e89c9d925d05a5bd0c7bb7695d28df98d7 --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/generated.proto @@ -0,0 +1,626 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package github.com.openshift.api.image.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// DockerImageReference points to a container image. +message DockerImageReference { + // Registry is the registry that contains the container image + optional string registry = 1; + + // Namespace is the namespace that contains the container image + optional string namespace = 2; + + // Name is the name of the container image + optional string name = 3; + + // Tag is which tag of the container image is being referenced + optional string tag = 4; + + // ID is the identifier for the container image + optional string iD = 5; +} + +// Image is an immutable representation of a container image and metadata at a point in time. +// Images are named by taking a hash of their contents (metadata and content) and any change +// in format, content, or metadata results in a new name. The images resource is primarily +// for use by cluster administrators and integrations like the cluster image registry - end +// users instead access images via the imagestreamtags or imagestreamimages resources. While +// image metadata is stored in the API, any integration that implements the container image +// registry API must provide its own storage for the raw manifest data, image config, and +// layer contents. +message Image { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // DockerImageReference is the string that can be used to pull this image. + optional string dockerImageReference = 2; + + // DockerImageMetadata contains metadata about this image + // +patchStrategy=replace + // +kubebuilder:pruning:PreserveUnknownFields + optional k8s.io.apimachinery.pkg.runtime.RawExtension dockerImageMetadata = 3; + + // DockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" + optional string dockerImageMetadataVersion = 4; + + // DockerImageManifest is the raw JSON of the manifest + optional string dockerImageManifest = 5; + + // DockerImageLayers represents the layers in the image. May not be set if the image does not define that data. + repeated ImageLayer dockerImageLayers = 6; + + // Signatures holds all signatures of the image. + // +patchMergeKey=name + // +patchStrategy=merge + repeated ImageSignature signatures = 7; + + // DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. + repeated bytes dockerImageSignatures = 8; + + // DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. + optional string dockerImageManifestMediaType = 9; + + // DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. + optional string dockerImageConfig = 10; +} + +// ImageBlobReferences describes the blob references within an image. +message ImageBlobReferences { + // imageMissing is true if the image is referenced by the image stream but the image + // object has been deleted from the API by an administrator. When this field is set, + // layers and config fields may be empty and callers that depend on the image metadata + // should consider the image to be unavailable for download or viewing. + // +optional + optional bool imageMissing = 3; + + // layers is the list of blobs that compose this image, from base layer to top layer. + // All layers referenced by this array will be defined in the blobs map. Some images + // may have zero layers. + // +optional + repeated string layers = 1; + + // config, if set, is the blob that contains the image config. Some images do + // not have separate config blobs and this field will be set to nil if so. + // +optional + optional string config = 2; +} + +// ImageImportSpec describes a request to import a specific image. +message ImageImportSpec { + // From is the source of an image to import; only kind DockerImage is allowed + optional k8s.io.api.core.v1.ObjectReference from = 1; + + // To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used + optional k8s.io.api.core.v1.LocalObjectReference to = 2; + + // ImportPolicy is the policy controlling how the image is imported + optional TagImportPolicy importPolicy = 3; + + // ReferencePolicy defines how other components should consume the image + optional TagReferencePolicy referencePolicy = 5; + + // IncludeManifest determines if the manifest for each image is returned in the response + optional bool includeManifest = 4; +} + +// ImageImportStatus describes the result of an image import. +message ImageImportStatus { + // Status is the status of the image import, including errors encountered while retrieving the image + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 1; + + // Image is the metadata of that image, if the image was located + optional Image image = 2; + + // Tag is the tag this image was located under, if any + optional string tag = 3; +} + +// ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. +message ImageLayer { + // Name of the layer as defined by the underlying store. + optional string name = 1; + + // Size of the layer in bytes as defined by the underlying store. + optional int64 size = 2; + + // MediaType of the referenced object. + optional string mediaType = 3; +} + +// ImageLayerData contains metadata about an image layer. +message ImageLayerData { + // Size of the layer in bytes as defined by the underlying store. This field is + // optional if the necessary information about size is not available. + optional int64 size = 1; + + // MediaType of the referenced object. + optional string mediaType = 2; +} + +// ImageList is a list of Image objects. +message ImageList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of images + repeated Image items = 2; +} + +// ImageLookupPolicy describes how an image stream can be used to override the image references +// used by pods, builds, and other resources in a namespace. +message ImageLookupPolicy { + // local will change the docker short image references (like "mysql" or + // "php:latest") on objects in this namespace to the image ID whenever they match + // this image stream, instead of reaching out to a remote registry. The name will + // be fully qualified to an image ID if found. The tag's referencePolicy is taken + // into account on the replaced value. Only works within the current namespace. + optional bool local = 3; +} + +// ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims +// as long as the signature is trusted. Based on this information it is possible to restrict runnable images +// to those matching cluster-wide policy. +// Mandatory fields should be parsed by clients doing image verification. The others are parsed from +// signature's content by the server. They serve just an informative purpose. +message ImageSignature { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Required: Describes a type of stored blob. + optional string type = 2; + + // Required: An opaque binary string which is an image's signature. + optional bytes content = 3; + + // Conditions represent the latest available observations of a signature's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated SignatureCondition conditions = 4; + + // A human readable string representing image's identity. It could be a product name and version, or an + // image pull spec (e.g. "registry.access.redhat.com/rhel7/rhel:7.2"). + optional string imageIdentity = 5; + + // Contains claims from the signature. + map<string, string> signedClaims = 6; + + // If specified, it is the time of signature's creation. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time created = 7; + + // If specified, it holds information about an issuer of signing certificate or key (a person or entity + // who signed the signing certificate or key). + optional SignatureIssuer issuedBy = 8; + + // If specified, it holds information about a subject of signing certificate or key (a person or entity + // who signed the image). + optional SignatureSubject issuedTo = 9; +} + +// An ImageStream stores a mapping of tags to images, metadata overrides that are applied +// when images are tagged in a stream, and an optional reference to a container image +// repository on a registry. Users typically update the spec.tags field to point to external +// images which are imported from container registries using credentials in your namespace +// with the pull secret type, or to existing image stream tags and images which are +// immediately accessible for tagging or pulling. The history of images applied to a tag +// is visible in the status.tags field and any user who can view an image stream is allowed +// to tag that image into their own image streams. Access to pull images from the integrated +// registry is granted by having the "get imagestreams/layers" permission on a given image +// stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both +// spec and status for that tag to be removed. Image stream history is retained until an +// administrator runs the prune operation, which removes references that are no longer in +// use. To preserve a historical image, ensure there is a tag in spec pointing to that image +// by its digest. +message ImageStream { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec describes the desired state of this stream + // +optional + optional ImageStreamSpec spec = 2; + + // Status describes the current state of this stream + // +optional + optional ImageStreamStatus status = 3; +} + +// ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. +// User interfaces and regular users can use this resource to access the metadata details of +// a tagged image in the image stream history for viewing, since Image resources are not +// directly accessible to end users. A not found error will be returned if no such image is +// referenced by a tag within the ImageStream. Images are created when spec tags are set on +// an image stream that represent an image in an external registry, when pushing to the +// integrated registry, or when tagging an existing image from one image stream to another. +// The name of an image stream image is in the form "<STREAM>@<DIGEST>", where the digest is +// the content addressible identifier for the image (sha256:xxxxx...). You can use +// ImageStreamImages as the from.kind of an image stream spec tag to reference an image +// exactly. The only operations supported on the imagestreamimage endpoint are retrieving +// the image. +message ImageStreamImage { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Image associated with the ImageStream and image name. + optional Image image = 2; +} + +// The image stream import resource provides an easy way for a user to find and import container images +// from other container image registries into the server. Individual images or an entire image repository may +// be imported, and users may choose to see the results of the import prior to tagging the resulting +// images into the specified image stream. +// +// This API is intended for end-user tools that need to see the metadata of the image prior to import +// (for instance, to generate an application from it). Clients that know the desired image can continue +// to create spec.tags directly into their image streams. +message ImageStreamImport { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is a description of the images that the user wishes to import + optional ImageStreamImportSpec spec = 2; + + // Status is the the result of importing the image + optional ImageStreamImportStatus status = 3; +} + +// ImageStreamImportSpec defines what images should be imported. +message ImageStreamImportSpec { + // Import indicates whether to perform an import - if so, the specified tags are set on the spec + // and status of the image stream defined by the type meta. + optional bool import = 1; + + // Repository is an optional import of an entire container image repository. A maximum limit on the + // number of tags imported this way is imposed by the server. + optional RepositoryImportSpec repository = 2; + + // Images are a list of individual images to import. + repeated ImageImportSpec images = 3; +} + +// ImageStreamImportStatus contains information about the status of an image stream import. +message ImageStreamImportStatus { + // Import is the image stream that was successfully updated or created when 'to' was set. + optional ImageStream import = 1; + + // Repository is set if spec.repository was set to the outcome of the import + optional RepositoryImportStatus repository = 2; + + // Images is set with the result of importing spec.images + repeated ImageImportStatus images = 3; +} + +// ImageStreamLayers describes information about the layers referenced by images in this +// image stream. +message ImageStreamLayers { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // blobs is a map of blob name to metadata about the blob. + map<string, ImageLayerData> blobs = 2; + + // images is a map between an image name and the names of the blobs and config that + // comprise the image. + map<string, ImageBlobReferences> images = 3; +} + +// ImageStreamList is a list of ImageStream objects. +message ImageStreamList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of imageStreams + repeated ImageStream items = 2; +} + +// ImageStreamMapping represents a mapping from a single image stream tag to a container +// image as well as the reference to the container image stream the image came from. This +// resource is used by privileged integrators to create an image resource and to associate +// it with an image stream in the status tags field. Creating an ImageStreamMapping will +// allow any user who can view the image stream to tag or pull that image, so only create +// mappings where the user has proven they have access to the image contents directly. +// The only operation supported for this resource is create and the metadata name and +// namespace should be set to the image stream containing the tag that should be updated. +message ImageStreamMapping { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Image is a container image. + optional Image image = 2; + + // Tag is a string value this image can be located with inside the stream. + optional string tag = 3; +} + +// ImageStreamSpec represents options for ImageStreams. +message ImageStreamSpec { + // lookupPolicy controls how other resources reference images within this namespace. + optional ImageLookupPolicy lookupPolicy = 3; + + // dockerImageRepository is optional, if specified this stream is backed by a container repository on this server + // Deprecated: This field is deprecated as of v3.7 and will be removed in a future release. + // Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead. + optional string dockerImageRepository = 1; + + // tags map arbitrary string values to specific image locators + // +patchMergeKey=name + // +patchStrategy=merge + repeated TagReference tags = 2; +} + +// ImageStreamStatus contains information about the state of this image stream. +message ImageStreamStatus { + // DockerImageRepository represents the effective location this stream may be accessed at. + // May be empty until the server determines where the repository is located + optional string dockerImageRepository = 1; + + // PublicDockerImageRepository represents the public location from where the image can + // be pulled outside the cluster. This field may be empty if the administrator + // has not exposed the integrated registry externally. + optional string publicDockerImageRepository = 3; + + // Tags are a historical record of images associated with each tag. The first entry in the + // TagEvent array is the currently tagged image. + // +patchMergeKey=tag + // +patchStrategy=merge + repeated NamedTagEventList tags = 2; +} + +// ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream. +// Use this resource to interact with the tags and images in an image stream by tag, or +// to see the image details for a particular tag. The image associated with this resource +// is the most recently successfully tagged, imported, or pushed image (as described in the +// image stream status.tags.items list for this tag). If an import is in progress or has +// failed the previous image will be shown. Deleting an image stream tag clears both the +// status and spec fields of an image stream. If no image can be retrieved for a given tag, +// a not found error will be returned. +message ImageStreamTag { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // tag is the spec tag associated with this image stream tag, and it may be null + // if only pushes have occurred to this image stream. + optional TagReference tag = 2; + + // generation is the current generation of the tagged image - if tag is provided + // and this value is not equal to the tag generation, a user has requested an + // import that has not completed, or conditions will be filled out indicating any + // error. + optional int64 generation = 3; + + // lookupPolicy indicates whether this tag will handle image references in this + // namespace. + optional ImageLookupPolicy lookupPolicy = 6; + + // conditions is an array of conditions that apply to the image stream tag. + repeated TagEventCondition conditions = 4; + + // image associated with the ImageStream and tag. + optional Image image = 5; +} + +// ImageStreamTagList is a list of ImageStreamTag objects. +message ImageStreamTagList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of image stream tags + repeated ImageStreamTag items = 2; +} + +// ImageTag represents a single tag within an image stream and includes the spec, +// the status history, and the currently referenced image (if any) of the provided +// tag. This type replaces the ImageStreamTag by providing a full view of the tag. +// ImageTags are returned for every spec or status tag present on the image stream. +// If no tag exists in either form a not found error will be returned by the API. +// A create operation will succeed if no spec tag has already been defined and the +// spec field is set. Delete will remove both spec and status elements from the +// image stream. +message ImageTag { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the spec tag associated with this image stream tag, and it may be null + // if only pushes have occurred to this image stream. + optional TagReference spec = 2; + + // status is the status tag details associated with this image stream tag, and it + // may be null if no push or import has been performed. + optional NamedTagEventList status = 3; + + // image is the details of the most recent image stream status tag, and it may be + // null if import has not completed or an administrator has deleted the image + // object. To verify this is the most recent image, you must verify the generation + // of the most recent status.items entry matches the spec tag (if a spec tag is + // set). This field will not be set when listing image tags. + optional Image image = 4; +} + +// ImageTagList is a list of ImageTag objects. When listing image tags, the image +// field is not populated. Tags are returned in alphabetical order by image stream +// and then tag. +message ImageTagList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of image stream tags + repeated ImageTag items = 2; +} + +// NamedTagEventList relates a tag to its image history. +message NamedTagEventList { + // Tag is the tag for which the history is recorded + optional string tag = 1; + + // Standard object's metadata. + repeated TagEvent items = 2; + + // Conditions is an array of conditions that apply to the tag event list. + repeated TagEventCondition conditions = 3; +} + +// RepositoryImportSpec describes a request to import images from a container image repository. +message RepositoryImportSpec { + // From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed + optional k8s.io.api.core.v1.ObjectReference from = 1; + + // ImportPolicy is the policy controlling how the image is imported + optional TagImportPolicy importPolicy = 2; + + // ReferencePolicy defines how other components should consume the image + optional TagReferencePolicy referencePolicy = 4; + + // IncludeManifest determines if the manifest for each image is returned in the response + optional bool includeManifest = 3; +} + +// RepositoryImportStatus describes the result of an image repository import +message RepositoryImportStatus { + // Status reflects whether any failure occurred during import + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 1; + + // Images is a list of images successfully retrieved by the import of the repository. + repeated ImageImportStatus images = 2; + + // AdditionalTags are tags that exist in the repository but were not imported because + // a maximum limit of automatic imports was applied. + repeated string additionalTags = 3; +} + +// SignatureCondition describes an image signature condition of particular kind at particular probe time. +message SignatureCondition { + // Type of signature condition, Complete or Failed. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition was checked. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + + // Last time the condition transit from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + optional string reason = 5; + + // Human readable message indicating details about last transition. + optional string message = 6; +} + +// SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject +// of signing certificate or key. +message SignatureGenericEntity { + // Organization name. + optional string organization = 1; + + // Common name (e.g. openshift-signing-service). + optional string commonName = 2; +} + +// SignatureIssuer holds information about an issuer of signing certificate or key. +message SignatureIssuer { + optional SignatureGenericEntity signatureGenericEntity = 1; +} + +// SignatureSubject holds information about a person or entity who created the signature. +message SignatureSubject { + optional SignatureGenericEntity signatureGenericEntity = 1; + + // If present, it is a human readable key id of public key belonging to the subject used to verify image + // signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g. + // 0x685ebe62bf278440). + optional string publicKeyID = 2; +} + +// TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag. +message TagEvent { + // Created holds the time the TagEvent was created + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time created = 1; + + // DockerImageReference is the string that can be used to pull this image + optional string dockerImageReference = 2; + + // Image is the image + optional string image = 3; + + // Generation is the spec tag generation that resulted in this tag being updated + optional int64 generation = 4; +} + +// TagEventCondition contains condition information for a tag event. +message TagEventCondition { + // Type of tag event condition, currently only ImportSuccess + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // LastTransitionTIme is the time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // Reason is a brief machine readable explanation for the condition's last transition. + optional string reason = 4; + + // Message is a human readable description of the details about last transition, complementing reason. + optional string message = 5; + + // Generation is the spec tag generation that this status corresponds to + optional int64 generation = 6; +} + +// TagImportPolicy controls how images related to this tag will be imported. +message TagImportPolicy { + // Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import. + optional bool insecure = 1; + + // Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported + optional bool scheduled = 2; +} + +// TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track. +message TagReference { + // Name of the tag + optional string name = 1; + + // Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags. + // +optional + map<string, string> annotations = 2; + + // Optional; if specified, a reference to another image that this tag should point to. Valid values + // are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references + // can only reference a tag within this same ImageStream. + optional k8s.io.api.core.v1.ObjectReference from = 3; + + // Reference states if the tag will be imported. Default value is false, which means the tag will + // be imported. + optional bool reference = 4; + + // Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference + // is changed the generation is set to match the current stream generation (which is incremented every + // time spec is changed). Other processes in the system like the image importer observe that the + // generation of spec tag is newer than the generation recorded in the status and use that as a trigger + // to import the newest remote tag. To trigger a new import, clients may set this value to zero which + // will reset the generation to the latest stream generation. Legacy clients will send this value as + // nil which will be merged with the current tag generation. + // +optional + optional int64 generation = 5; + + // ImportPolicy is information that controls how images may be imported by the server. + optional TagImportPolicy importPolicy = 6; + + // ReferencePolicy defines how other components should consume the image. + optional TagReferencePolicy referencePolicy = 7; +} + +// TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when +// image change triggers in deployment configs or builds are resolved. This allows the image stream +// author to control how images are accessed. +message TagReferencePolicy { + // Type determines how the image pull spec should be transformed when the image stream tag is used in + // deployment config triggers or new builds. The default value is `Source`, indicating the original + // location of the image should be used (if imported). The user may also specify `Local`, indicating + // that the pull spec should point to the integrated container image registry and leverage the registry's + // ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this + // image to be managed from the image stream's namespace, so others on the platform can access a remote + // image but have no access to the remote secret. It also allows the image layers to be mirrored into + // the local registry which the images can still be pulled even if the upstream registry is unavailable. + optional string type = 1; +} + diff --git a/vendor/github.com/openshift/api/image/v1/legacy.go b/vendor/github.com/openshift/api/image/v1/legacy.go new file mode 100644 index 0000000000000000000000000000000000000000..02bbaa2906fed84600619b96ce504f741cb118e2 --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/legacy.go @@ -0,0 +1,33 @@ +package v1 + +import ( + "github.com/openshift/api/image/docker10" + "github.com/openshift/api/image/dockerpre012" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, docker10.AddToSchemeInCoreGroup, dockerpre012.AddToSchemeInCoreGroup, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Image{}, + &ImageList{}, + &ImageSignature{}, + &ImageStream{}, + &ImageStreamList{}, + &ImageStreamMapping{}, + &ImageStreamTag{}, + &ImageStreamTagList{}, + &ImageStreamImage{}, + &ImageStreamImport{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/image/v1/register.go b/vendor/github.com/openshift/api/image/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..fc0910c24686f227c1973c443f10559283f5f0ee --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/register.go @@ -0,0 +1,54 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/openshift/api/image/docker10" + "github.com/openshift/api/image/dockerpre012" +) + +var ( + GroupName = "image.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, docker10.AddToScheme, dockerpre012.AddToScheme, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Image{}, + &ImageList{}, + &ImageSignature{}, + &ImageStream{}, + &ImageStreamList{}, + &ImageStreamMapping{}, + &ImageStreamTag{}, + &ImageStreamTagList{}, + &ImageStreamImage{}, + &ImageStreamLayers{}, + &ImageStreamImport{}, + &ImageTag{}, + &ImageTagList{}, + &corev1.SecretList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/image/v1/types.go b/vendor/github.com/openshift/api/image/v1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..c29cad9b25d2fb8111e2d3cf125088a22796f39b --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/types.go @@ -0,0 +1,634 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageList is a list of Image objects. +type ImageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of images + Items []Image `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Image is an immutable representation of a container image and metadata at a point in time. +// Images are named by taking a hash of their contents (metadata and content) and any change +// in format, content, or metadata results in a new name. The images resource is primarily +// for use by cluster administrators and integrations like the cluster image registry - end +// users instead access images via the imagestreamtags or imagestreamimages resources. While +// image metadata is stored in the API, any integration that implements the container image +// registry API must provide its own storage for the raw manifest data, image config, and +// layer contents. +type Image struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // DockerImageReference is the string that can be used to pull this image. + DockerImageReference string `json:"dockerImageReference,omitempty" protobuf:"bytes,2,opt,name=dockerImageReference"` + // DockerImageMetadata contains metadata about this image + // +patchStrategy=replace + // +kubebuilder:pruning:PreserveUnknownFields + DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty" patchStrategy:"replace" protobuf:"bytes,3,opt,name=dockerImageMetadata"` + // DockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" + DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty" protobuf:"bytes,4,opt,name=dockerImageMetadataVersion"` + // DockerImageManifest is the raw JSON of the manifest + DockerImageManifest string `json:"dockerImageManifest,omitempty" protobuf:"bytes,5,opt,name=dockerImageManifest"` + // DockerImageLayers represents the layers in the image. May not be set if the image does not define that data. + DockerImageLayers []ImageLayer `json:"dockerImageLayers" protobuf:"bytes,6,rep,name=dockerImageLayers"` + // Signatures holds all signatures of the image. + // +patchMergeKey=name + // +patchStrategy=merge + Signatures []ImageSignature `json:"signatures,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=signatures"` + // DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. + DockerImageSignatures [][]byte `json:"dockerImageSignatures,omitempty" protobuf:"bytes,8,rep,name=dockerImageSignatures"` + // DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. + DockerImageManifestMediaType string `json:"dockerImageManifestMediaType,omitempty" protobuf:"bytes,9,opt,name=dockerImageManifestMediaType"` + // DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. + DockerImageConfig string `json:"dockerImageConfig,omitempty" protobuf:"bytes,10,opt,name=dockerImageConfig"` +} + +// ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. +type ImageLayer struct { + // Name of the layer as defined by the underlying store. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Size of the layer in bytes as defined by the underlying store. + LayerSize int64 `json:"size" protobuf:"varint,2,opt,name=size"` + // MediaType of the referenced object. + MediaType string `json:"mediaType" protobuf:"bytes,3,opt,name=mediaType"` +} + +// +genclient +// +genclient:onlyVerbs=create,delete +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims +// as long as the signature is trusted. Based on this information it is possible to restrict runnable images +// to those matching cluster-wide policy. +// Mandatory fields should be parsed by clients doing image verification. The others are parsed from +// signature's content by the server. They serve just an informative purpose. +type ImageSignature struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Required: Describes a type of stored blob. + Type string `json:"type" protobuf:"bytes,2,opt,name=type"` + // Required: An opaque binary string which is an image's signature. + Content []byte `json:"content" protobuf:"bytes,3,opt,name=content"` + // Conditions represent the latest available observations of a signature's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` + + // Following metadata fields will be set by server if the signature content is successfully parsed and + // the information available. + + // A human readable string representing image's identity. It could be a product name and version, or an + // image pull spec (e.g. "registry.access.redhat.com/rhel7/rhel:7.2"). + ImageIdentity string `json:"imageIdentity,omitempty" protobuf:"bytes,5,opt,name=imageIdentity"` + // Contains claims from the signature. + SignedClaims map[string]string `json:"signedClaims,omitempty" protobuf:"bytes,6,rep,name=signedClaims"` + // If specified, it is the time of signature's creation. + Created *metav1.Time `json:"created,omitempty" protobuf:"bytes,7,opt,name=created"` + // If specified, it holds information about an issuer of signing certificate or key (a person or entity + // who signed the signing certificate or key). + IssuedBy *SignatureIssuer `json:"issuedBy,omitempty" protobuf:"bytes,8,opt,name=issuedBy"` + // If specified, it holds information about a subject of signing certificate or key (a person or entity + // who signed the image). + IssuedTo *SignatureSubject `json:"issuedTo,omitempty" protobuf:"bytes,9,opt,name=issuedTo"` +} + +/// SignatureConditionType is a type of image signature condition. +type SignatureConditionType string + +// SignatureCondition describes an image signature condition of particular kind at particular probe time. +type SignatureCondition struct { + // Type of signature condition, Complete or Failed. + Type SignatureConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=SignatureConditionType"` + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition was checked. + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transit from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // (brief) reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject +// of signing certificate or key. +type SignatureGenericEntity struct { + // Organization name. + Organization string `json:"organization,omitempty" protobuf:"bytes,1,opt,name=organization"` + // Common name (e.g. openshift-signing-service). + CommonName string `json:"commonName,omitempty" protobuf:"bytes,2,opt,name=commonName"` +} + +// SignatureIssuer holds information about an issuer of signing certificate or key. +type SignatureIssuer struct { + SignatureGenericEntity `json:",inline" protobuf:"bytes,1,opt,name=signatureGenericEntity"` +} + +// SignatureSubject holds information about a person or entity who created the signature. +type SignatureSubject struct { + SignatureGenericEntity `json:",inline" protobuf:"bytes,1,opt,name=signatureGenericEntity"` + // If present, it is a human readable key id of public key belonging to the subject used to verify image + // signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g. + // 0x685ebe62bf278440). + PublicKeyID string `json:"publicKeyID" protobuf:"bytes,2,opt,name=publicKeyID"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamList is a list of ImageStream objects. +type ImageStreamList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of imageStreams + Items []ImageStream `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:method=Secrets,verb=get,subresource=secrets,result=k8s.io/api/core/v1.SecretList +// +genclient:method=Layers,verb=get,subresource=layers,result=github.com/openshift/api/image/v1.ImageStreamLayers +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// An ImageStream stores a mapping of tags to images, metadata overrides that are applied +// when images are tagged in a stream, and an optional reference to a container image +// repository on a registry. Users typically update the spec.tags field to point to external +// images which are imported from container registries using credentials in your namespace +// with the pull secret type, or to existing image stream tags and images which are +// immediately accessible for tagging or pulling. The history of images applied to a tag +// is visible in the status.tags field and any user who can view an image stream is allowed +// to tag that image into their own image streams. Access to pull images from the integrated +// registry is granted by having the "get imagestreams/layers" permission on a given image +// stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both +// spec and status for that tag to be removed. Image stream history is retained until an +// administrator runs the prune operation, which removes references that are no longer in +// use. To preserve a historical image, ensure there is a tag in spec pointing to that image +// by its digest. +type ImageStream struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec describes the desired state of this stream + // +optional + Spec ImageStreamSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // Status describes the current state of this stream + // +optional + Status ImageStreamStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ImageStreamSpec represents options for ImageStreams. +type ImageStreamSpec struct { + // lookupPolicy controls how other resources reference images within this namespace. + LookupPolicy ImageLookupPolicy `json:"lookupPolicy,omitempty" protobuf:"bytes,3,opt,name=lookupPolicy"` + // dockerImageRepository is optional, if specified this stream is backed by a container repository on this server + // Deprecated: This field is deprecated as of v3.7 and will be removed in a future release. + // Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead. + DockerImageRepository string `json:"dockerImageRepository,omitempty" protobuf:"bytes,1,opt,name=dockerImageRepository"` + // tags map arbitrary string values to specific image locators + // +patchMergeKey=name + // +patchStrategy=merge + Tags []TagReference `json:"tags,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=tags"` +} + +// ImageLookupPolicy describes how an image stream can be used to override the image references +// used by pods, builds, and other resources in a namespace. +type ImageLookupPolicy struct { + // local will change the docker short image references (like "mysql" or + // "php:latest") on objects in this namespace to the image ID whenever they match + // this image stream, instead of reaching out to a remote registry. The name will + // be fully qualified to an image ID if found. The tag's referencePolicy is taken + // into account on the replaced value. Only works within the current namespace. + Local bool `json:"local" protobuf:"varint,3,opt,name=local"` +} + +// TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track. +type TagReference struct { + // Name of the tag + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags. + // +optional + Annotations map[string]string `json:"annotations" protobuf:"bytes,2,rep,name=annotations"` + // Optional; if specified, a reference to another image that this tag should point to. Valid values + // are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references + // can only reference a tag within this same ImageStream. + From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,3,opt,name=from"` + // Reference states if the tag will be imported. Default value is false, which means the tag will + // be imported. + Reference bool `json:"reference,omitempty" protobuf:"varint,4,opt,name=reference"` + // Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference + // is changed the generation is set to match the current stream generation (which is incremented every + // time spec is changed). Other processes in the system like the image importer observe that the + // generation of spec tag is newer than the generation recorded in the status and use that as a trigger + // to import the newest remote tag. To trigger a new import, clients may set this value to zero which + // will reset the generation to the latest stream generation. Legacy clients will send this value as + // nil which will be merged with the current tag generation. + // +optional + Generation *int64 `json:"generation" protobuf:"varint,5,opt,name=generation"` + // ImportPolicy is information that controls how images may be imported by the server. + ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,6,opt,name=importPolicy"` + // ReferencePolicy defines how other components should consume the image. + ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,7,opt,name=referencePolicy"` +} + +// TagImportPolicy controls how images related to this tag will be imported. +type TagImportPolicy struct { + // Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import. + Insecure bool `json:"insecure,omitempty" protobuf:"varint,1,opt,name=insecure"` + // Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported + Scheduled bool `json:"scheduled,omitempty" protobuf:"varint,2,opt,name=scheduled"` +} + +// TagReferencePolicyType describes how pull-specs for images in an image stream tag are generated when +// image change triggers are fired. +type TagReferencePolicyType string + +const ( + // SourceTagReferencePolicy indicates the image's original location should be used when the image stream tag + // is resolved into other resources (builds and deployment configurations). + SourceTagReferencePolicy TagReferencePolicyType = "Source" + // LocalTagReferencePolicy indicates the image should prefer to pull via the local integrated registry, + // falling back to the remote location if the integrated registry has not been configured. The reference will + // use the internal DNS name or registry service IP. + LocalTagReferencePolicy TagReferencePolicyType = "Local" +) + +// TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when +// image change triggers in deployment configs or builds are resolved. This allows the image stream +// author to control how images are accessed. +type TagReferencePolicy struct { + // Type determines how the image pull spec should be transformed when the image stream tag is used in + // deployment config triggers or new builds. The default value is `Source`, indicating the original + // location of the image should be used (if imported). The user may also specify `Local`, indicating + // that the pull spec should point to the integrated container image registry and leverage the registry's + // ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this + // image to be managed from the image stream's namespace, so others on the platform can access a remote + // image but have no access to the remote secret. It also allows the image layers to be mirrored into + // the local registry which the images can still be pulled even if the upstream registry is unavailable. + Type TagReferencePolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TagReferencePolicyType"` +} + +// ImageStreamStatus contains information about the state of this image stream. +type ImageStreamStatus struct { + // DockerImageRepository represents the effective location this stream may be accessed at. + // May be empty until the server determines where the repository is located + DockerImageRepository string `json:"dockerImageRepository" protobuf:"bytes,1,opt,name=dockerImageRepository"` + // PublicDockerImageRepository represents the public location from where the image can + // be pulled outside the cluster. This field may be empty if the administrator + // has not exposed the integrated registry externally. + PublicDockerImageRepository string `json:"publicDockerImageRepository,omitempty" protobuf:"bytes,3,opt,name=publicDockerImageRepository"` + // Tags are a historical record of images associated with each tag. The first entry in the + // TagEvent array is the currently tagged image. + // +patchMergeKey=tag + // +patchStrategy=merge + Tags []NamedTagEventList `json:"tags,omitempty" patchStrategy:"merge" patchMergeKey:"tag" protobuf:"bytes,2,rep,name=tags"` +} + +// NamedTagEventList relates a tag to its image history. +type NamedTagEventList struct { + // Tag is the tag for which the history is recorded + Tag string `json:"tag" protobuf:"bytes,1,opt,name=tag"` + // Standard object's metadata. + Items []TagEvent `json:"items" protobuf:"bytes,2,rep,name=items"` + // Conditions is an array of conditions that apply to the tag event list. + Conditions []TagEventCondition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` +} + +// TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag. +type TagEvent struct { + // Created holds the time the TagEvent was created + Created metav1.Time `json:"created" protobuf:"bytes,1,opt,name=created"` + // DockerImageReference is the string that can be used to pull this image + DockerImageReference string `json:"dockerImageReference" protobuf:"bytes,2,opt,name=dockerImageReference"` + // Image is the image + Image string `json:"image" protobuf:"bytes,3,opt,name=image"` + // Generation is the spec tag generation that resulted in this tag being updated + Generation int64 `json:"generation" protobuf:"varint,4,opt,name=generation"` +} + +type TagEventConditionType string + +// These are valid conditions of TagEvents. +const ( + // ImportSuccess with status False means the import of the specific tag failed + ImportSuccess TagEventConditionType = "ImportSuccess" +) + +// TagEventCondition contains condition information for a tag event. +type TagEventCondition struct { + // Type of tag event condition, currently only ImportSuccess + Type TagEventConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TagEventConditionType"` + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // LastTransitionTIme is the time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // Reason is a brief machine readable explanation for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // Message is a human readable description of the details about last transition, complementing reason. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` + // Generation is the spec tag generation that this status corresponds to + Generation int64 `json:"generation" protobuf:"varint,6,opt,name=generation"` +} + +// +genclient +// +genclient:skipVerbs=get,list,create,update,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=k8s.io/apimachinery/pkg/apis/meta/v1.Status +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamMapping represents a mapping from a single image stream tag to a container +// image as well as the reference to the container image stream the image came from. This +// resource is used by privileged integrators to create an image resource and to associate +// it with an image stream in the status tags field. Creating an ImageStreamMapping will +// allow any user who can view the image stream to tag or pull that image, so only create +// mappings where the user has proven they have access to the image contents directly. +// The only operation supported for this resource is create and the metadata name and +// namespace should be set to the image stream containing the tag that should be updated. +type ImageStreamMapping struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Image is a container image. + Image Image `json:"image" protobuf:"bytes,2,opt,name=image"` + // Tag is a string value this image can be located with inside the stream. + Tag string `json:"tag" protobuf:"bytes,3,opt,name=tag"` +} + +// +genclient +// +genclient:onlyVerbs=get,list,create,update,delete +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream. +// Use this resource to interact with the tags and images in an image stream by tag, or +// to see the image details for a particular tag. The image associated with this resource +// is the most recently successfully tagged, imported, or pushed image (as described in the +// image stream status.tags.items list for this tag). If an import is in progress or has +// failed the previous image will be shown. Deleting an image stream tag clears both the +// status and spec fields of an image stream. If no image can be retrieved for a given tag, +// a not found error will be returned. +type ImageStreamTag struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // tag is the spec tag associated with this image stream tag, and it may be null + // if only pushes have occurred to this image stream. + Tag *TagReference `json:"tag" protobuf:"bytes,2,opt,name=tag"` + + // generation is the current generation of the tagged image - if tag is provided + // and this value is not equal to the tag generation, a user has requested an + // import that has not completed, or conditions will be filled out indicating any + // error. + Generation int64 `json:"generation" protobuf:"varint,3,opt,name=generation"` + + // lookupPolicy indicates whether this tag will handle image references in this + // namespace. + LookupPolicy ImageLookupPolicy `json:"lookupPolicy" protobuf:"varint,6,opt,name=lookupPolicy"` + + // conditions is an array of conditions that apply to the image stream tag. + Conditions []TagEventCondition `json:"conditions,omitempty" protobuf:"bytes,4,rep,name=conditions"` + + // image associated with the ImageStream and tag. + Image Image `json:"image" protobuf:"bytes,5,opt,name=image"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamTagList is a list of ImageStreamTag objects. +type ImageStreamTagList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of image stream tags + Items []ImageStreamTag `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:onlyVerbs=get,list,create,update,delete +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageTag represents a single tag within an image stream and includes the spec, +// the status history, and the currently referenced image (if any) of the provided +// tag. This type replaces the ImageStreamTag by providing a full view of the tag. +// ImageTags are returned for every spec or status tag present on the image stream. +// If no tag exists in either form a not found error will be returned by the API. +// A create operation will succeed if no spec tag has already been defined and the +// spec field is set. Delete will remove both spec and status elements from the +// image stream. +type ImageTag struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the spec tag associated with this image stream tag, and it may be null + // if only pushes have occurred to this image stream. + Spec *TagReference `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status is the status tag details associated with this image stream tag, and it + // may be null if no push or import has been performed. + Status *NamedTagEventList `json:"status" protobuf:"bytes,3,opt,name=status"` + // image is the details of the most recent image stream status tag, and it may be + // null if import has not completed or an administrator has deleted the image + // object. To verify this is the most recent image, you must verify the generation + // of the most recent status.items entry matches the spec tag (if a spec tag is + // set). This field will not be set when listing image tags. + Image *Image `json:"image" protobuf:"bytes,4,opt,name=image"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageTagList is a list of ImageTag objects. When listing image tags, the image +// field is not populated. Tags are returned in alphabetical order by image stream +// and then tag. +type ImageTagList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of image stream tags + Items []ImageTag `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:onlyVerbs=get +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. +// User interfaces and regular users can use this resource to access the metadata details of +// a tagged image in the image stream history for viewing, since Image resources are not +// directly accessible to end users. A not found error will be returned if no such image is +// referenced by a tag within the ImageStream. Images are created when spec tags are set on +// an image stream that represent an image in an external registry, when pushing to the +// integrated registry, or when tagging an existing image from one image stream to another. +// The name of an image stream image is in the form "<STREAM>@<DIGEST>", where the digest is +// the content addressible identifier for the image (sha256:xxxxx...). You can use +// ImageStreamImages as the from.kind of an image stream spec tag to reference an image +// exactly. The only operations supported on the imagestreamimage endpoint are retrieving +// the image. +type ImageStreamImage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Image associated with the ImageStream and image name. + Image Image `json:"image" protobuf:"bytes,2,opt,name=image"` +} + +// DockerImageReference points to a container image. +type DockerImageReference struct { + // Registry is the registry that contains the container image + Registry string `protobuf:"bytes,1,opt,name=registry"` + // Namespace is the namespace that contains the container image + Namespace string `protobuf:"bytes,2,opt,name=namespace"` + // Name is the name of the container image + Name string `protobuf:"bytes,3,opt,name=name"` + // Tag is which tag of the container image is being referenced + Tag string `protobuf:"bytes,4,opt,name=tag"` + // ID is the identifier for the container image + ID string `protobuf:"bytes,5,opt,name=iD"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamLayers describes information about the layers referenced by images in this +// image stream. +type ImageStreamLayers struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // blobs is a map of blob name to metadata about the blob. + Blobs map[string]ImageLayerData `json:"blobs" protobuf:"bytes,2,rep,name=blobs"` + // images is a map between an image name and the names of the blobs and config that + // comprise the image. + Images map[string]ImageBlobReferences `json:"images" protobuf:"bytes,3,rep,name=images"` +} + +// ImageBlobReferences describes the blob references within an image. +type ImageBlobReferences struct { + // imageMissing is true if the image is referenced by the image stream but the image + // object has been deleted from the API by an administrator. When this field is set, + // layers and config fields may be empty and callers that depend on the image metadata + // should consider the image to be unavailable for download or viewing. + // +optional + ImageMissing bool `json:"imageMissing" protobuf:"varint,3,opt,name=imageMissing"` + // layers is the list of blobs that compose this image, from base layer to top layer. + // All layers referenced by this array will be defined in the blobs map. Some images + // may have zero layers. + // +optional + Layers []string `json:"layers" protobuf:"bytes,1,rep,name=layers"` + // config, if set, is the blob that contains the image config. Some images do + // not have separate config blobs and this field will be set to nil if so. + // +optional + Config *string `json:"config" protobuf:"bytes,2,opt,name=config"` +} + +// ImageLayerData contains metadata about an image layer. +type ImageLayerData struct { + // Size of the layer in bytes as defined by the underlying store. This field is + // optional if the necessary information about size is not available. + LayerSize *int64 `json:"size" protobuf:"varint,1,opt,name=size"` + // MediaType of the referenced object. + MediaType string `json:"mediaType" protobuf:"bytes,2,opt,name=mediaType"` +} + +// +genclient +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// The image stream import resource provides an easy way for a user to find and import container images +// from other container image registries into the server. Individual images or an entire image repository may +// be imported, and users may choose to see the results of the import prior to tagging the resulting +// images into the specified image stream. +// +// This API is intended for end-user tools that need to see the metadata of the image prior to import +// (for instance, to generate an application from it). Clients that know the desired image can continue +// to create spec.tags directly into their image streams. +type ImageStreamImport struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is a description of the images that the user wishes to import + Spec ImageStreamImportSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // Status is the the result of importing the image + Status ImageStreamImportStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// ImageStreamImportSpec defines what images should be imported. +type ImageStreamImportSpec struct { + // Import indicates whether to perform an import - if so, the specified tags are set on the spec + // and status of the image stream defined by the type meta. + Import bool `json:"import" protobuf:"varint,1,opt,name=import"` + // Repository is an optional import of an entire container image repository. A maximum limit on the + // number of tags imported this way is imposed by the server. + Repository *RepositoryImportSpec `json:"repository,omitempty" protobuf:"bytes,2,opt,name=repository"` + // Images are a list of individual images to import. + Images []ImageImportSpec `json:"images,omitempty" protobuf:"bytes,3,rep,name=images"` +} + +// ImageStreamImportStatus contains information about the status of an image stream import. +type ImageStreamImportStatus struct { + // Import is the image stream that was successfully updated or created when 'to' was set. + Import *ImageStream `json:"import,omitempty" protobuf:"bytes,1,opt,name=import"` + // Repository is set if spec.repository was set to the outcome of the import + Repository *RepositoryImportStatus `json:"repository,omitempty" protobuf:"bytes,2,opt,name=repository"` + // Images is set with the result of importing spec.images + Images []ImageImportStatus `json:"images,omitempty" protobuf:"bytes,3,rep,name=images"` +} + +// RepositoryImportSpec describes a request to import images from a container image repository. +type RepositoryImportSpec struct { + // From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // ImportPolicy is the policy controlling how the image is imported + ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,2,opt,name=importPolicy"` + // ReferencePolicy defines how other components should consume the image + ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,4,opt,name=referencePolicy"` + // IncludeManifest determines if the manifest for each image is returned in the response + IncludeManifest bool `json:"includeManifest,omitempty" protobuf:"varint,3,opt,name=includeManifest"` +} + +// RepositoryImportStatus describes the result of an image repository import +type RepositoryImportStatus struct { + // Status reflects whether any failure occurred during import + Status metav1.Status `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"` + // Images is a list of images successfully retrieved by the import of the repository. + Images []ImageImportStatus `json:"images,omitempty" protobuf:"bytes,2,rep,name=images"` + // AdditionalTags are tags that exist in the repository but were not imported because + // a maximum limit of automatic imports was applied. + AdditionalTags []string `json:"additionalTags,omitempty" protobuf:"bytes,3,rep,name=additionalTags"` +} + +// ImageImportSpec describes a request to import a specific image. +type ImageImportSpec struct { + // From is the source of an image to import; only kind DockerImage is allowed + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + // To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used + To *corev1.LocalObjectReference `json:"to,omitempty" protobuf:"bytes,2,opt,name=to"` + + // ImportPolicy is the policy controlling how the image is imported + ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,3,opt,name=importPolicy"` + // ReferencePolicy defines how other components should consume the image + ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,5,opt,name=referencePolicy"` + // IncludeManifest determines if the manifest for each image is returned in the response + IncludeManifest bool `json:"includeManifest,omitempty" protobuf:"varint,4,opt,name=includeManifest"` +} + +// ImageImportStatus describes the result of an image import. +type ImageImportStatus struct { + // Status is the status of the image import, including errors encountered while retrieving the image + Status metav1.Status `json:"status" protobuf:"bytes,1,opt,name=status"` + // Image is the metadata of that image, if the image was located + Image *Image `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Tag is the tag this image was located under, if any + Tag string `json:"tag,omitempty" protobuf:"bytes,3,opt,name=tag"` +} diff --git a/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..aab4664823ce1e01f077bf40438a26d54f0efc70 --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go @@ -0,0 +1,978 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerImageReference) DeepCopyInto(out *DockerImageReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImageReference. +func (in *DockerImageReference) DeepCopy() *DockerImageReference { + if in == nil { + return nil + } + out := new(DockerImageReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.DockerImageMetadata.DeepCopyInto(&out.DockerImageMetadata) + if in.DockerImageLayers != nil { + in, out := &in.DockerImageLayers, &out.DockerImageLayers + *out = make([]ImageLayer, len(*in)) + copy(*out, *in) + } + if in.Signatures != nil { + in, out := &in.Signatures, &out.Signatures + *out = make([]ImageSignature, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DockerImageSignatures != nil { + in, out := &in.DockerImageSignatures, &out.DockerImageSignatures + *out = make([][]byte, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make([]byte, len(*in)) + copy(*out, *in) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Image) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBlobReferences) DeepCopyInto(out *ImageBlobReferences) { + *out = *in + if in.Layers != nil { + in, out := &in.Layers, &out.Layers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBlobReferences. +func (in *ImageBlobReferences) DeepCopy() *ImageBlobReferences { + if in == nil { + return nil + } + out := new(ImageBlobReferences) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageImportSpec) DeepCopyInto(out *ImageImportSpec) { + *out = *in + out.From = in.From + if in.To != nil { + in, out := &in.To, &out.To + *out = new(corev1.LocalObjectReference) + **out = **in + } + out.ImportPolicy = in.ImportPolicy + out.ReferencePolicy = in.ReferencePolicy + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageImportSpec. +func (in *ImageImportSpec) DeepCopy() *ImageImportSpec { + if in == nil { + return nil + } + out := new(ImageImportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageImportStatus) DeepCopyInto(out *ImageImportStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(Image) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageImportStatus. +func (in *ImageImportStatus) DeepCopy() *ImageImportStatus { + if in == nil { + return nil + } + out := new(ImageImportStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLayer) DeepCopyInto(out *ImageLayer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLayer. +func (in *ImageLayer) DeepCopy() *ImageLayer { + if in == nil { + return nil + } + out := new(ImageLayer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLayerData) DeepCopyInto(out *ImageLayerData) { + *out = *in + if in.LayerSize != nil { + in, out := &in.LayerSize, &out.LayerSize + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLayerData. +func (in *ImageLayerData) DeepCopy() *ImageLayerData { + if in == nil { + return nil + } + out := new(ImageLayerData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageList) DeepCopyInto(out *ImageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Image, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. +func (in *ImageList) DeepCopy() *ImageList { + if in == nil { + return nil + } + out := new(ImageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLookupPolicy) DeepCopyInto(out *ImageLookupPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLookupPolicy. +func (in *ImageLookupPolicy) DeepCopy() *ImageLookupPolicy { + if in == nil { + return nil + } + out := new(ImageLookupPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSignature) DeepCopyInto(out *ImageSignature) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]SignatureCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SignedClaims != nil { + in, out := &in.SignedClaims, &out.SignedClaims + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Created != nil { + in, out := &in.Created, &out.Created + *out = (*in).DeepCopy() + } + if in.IssuedBy != nil { + in, out := &in.IssuedBy, &out.IssuedBy + *out = new(SignatureIssuer) + **out = **in + } + if in.IssuedTo != nil { + in, out := &in.IssuedTo, &out.IssuedTo + *out = new(SignatureSubject) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSignature. +func (in *ImageSignature) DeepCopy() *ImageSignature { + if in == nil { + return nil + } + out := new(ImageSignature) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageSignature) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStream) DeepCopyInto(out *ImageStream) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStream. +func (in *ImageStream) DeepCopy() *ImageStream { + if in == nil { + return nil + } + out := new(ImageStream) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStream) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamImage) DeepCopyInto(out *ImageStreamImage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Image.DeepCopyInto(&out.Image) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImage. +func (in *ImageStreamImage) DeepCopy() *ImageStreamImage { + if in == nil { + return nil + } + out := new(ImageStreamImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamImage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamImport) DeepCopyInto(out *ImageStreamImport) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImport. +func (in *ImageStreamImport) DeepCopy() *ImageStreamImport { + if in == nil { + return nil + } + out := new(ImageStreamImport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamImport) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamImportSpec) DeepCopyInto(out *ImageStreamImportSpec) { + *out = *in + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = new(RepositoryImportSpec) + **out = **in + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ImageImportSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImportSpec. +func (in *ImageStreamImportSpec) DeepCopy() *ImageStreamImportSpec { + if in == nil { + return nil + } + out := new(ImageStreamImportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamImportStatus) DeepCopyInto(out *ImageStreamImportStatus) { + *out = *in + if in.Import != nil { + in, out := &in.Import, &out.Import + *out = new(ImageStream) + (*in).DeepCopyInto(*out) + } + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = new(RepositoryImportStatus) + (*in).DeepCopyInto(*out) + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ImageImportStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImportStatus. +func (in *ImageStreamImportStatus) DeepCopy() *ImageStreamImportStatus { + if in == nil { + return nil + } + out := new(ImageStreamImportStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamLayers) DeepCopyInto(out *ImageStreamLayers) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Blobs != nil { + in, out := &in.Blobs, &out.Blobs + *out = make(map[string]ImageLayerData, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make(map[string]ImageBlobReferences, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamLayers. +func (in *ImageStreamLayers) DeepCopy() *ImageStreamLayers { + if in == nil { + return nil + } + out := new(ImageStreamLayers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamLayers) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamList) DeepCopyInto(out *ImageStreamList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageStream, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamList. +func (in *ImageStreamList) DeepCopy() *ImageStreamList { + if in == nil { + return nil + } + out := new(ImageStreamList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamMapping) DeepCopyInto(out *ImageStreamMapping) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Image.DeepCopyInto(&out.Image) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamMapping. +func (in *ImageStreamMapping) DeepCopy() *ImageStreamMapping { + if in == nil { + return nil + } + out := new(ImageStreamMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamMapping) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamSpec) DeepCopyInto(out *ImageStreamSpec) { + *out = *in + out.LookupPolicy = in.LookupPolicy + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]TagReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamSpec. +func (in *ImageStreamSpec) DeepCopy() *ImageStreamSpec { + if in == nil { + return nil + } + out := new(ImageStreamSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamStatus) DeepCopyInto(out *ImageStreamStatus) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]NamedTagEventList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamStatus. +func (in *ImageStreamStatus) DeepCopy() *ImageStreamStatus { + if in == nil { + return nil + } + out := new(ImageStreamStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamTag) DeepCopyInto(out *ImageStreamTag) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(TagReference) + (*in).DeepCopyInto(*out) + } + out.LookupPolicy = in.LookupPolicy + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]TagEventCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Image.DeepCopyInto(&out.Image) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamTag. +func (in *ImageStreamTag) DeepCopy() *ImageStreamTag { + if in == nil { + return nil + } + out := new(ImageStreamTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamTag) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamTagList) DeepCopyInto(out *ImageStreamTagList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageStreamTag, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamTagList. +func (in *ImageStreamTagList) DeepCopy() *ImageStreamTagList { + if in == nil { + return nil + } + out := new(ImageStreamTagList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamTagList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTag) DeepCopyInto(out *ImageTag) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(TagReference) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(NamedTagEventList) + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(Image) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTag. +func (in *ImageTag) DeepCopy() *ImageTag { + if in == nil { + return nil + } + out := new(ImageTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageTag) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTagList) DeepCopyInto(out *ImageTagList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageTag, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagList. +func (in *ImageTagList) DeepCopy() *ImageTagList { + if in == nil { + return nil + } + out := new(ImageTagList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageTagList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedTagEventList) DeepCopyInto(out *NamedTagEventList) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TagEvent, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]TagEventCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedTagEventList. +func (in *NamedTagEventList) DeepCopy() *NamedTagEventList { + if in == nil { + return nil + } + out := new(NamedTagEventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryImportSpec) DeepCopyInto(out *RepositoryImportSpec) { + *out = *in + out.From = in.From + out.ImportPolicy = in.ImportPolicy + out.ReferencePolicy = in.ReferencePolicy + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryImportSpec. +func (in *RepositoryImportSpec) DeepCopy() *RepositoryImportSpec { + if in == nil { + return nil + } + out := new(RepositoryImportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryImportStatus) DeepCopyInto(out *RepositoryImportStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ImageImportStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalTags != nil { + in, out := &in.AdditionalTags, &out.AdditionalTags + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryImportStatus. +func (in *RepositoryImportStatus) DeepCopy() *RepositoryImportStatus { + if in == nil { + return nil + } + out := new(RepositoryImportStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureCondition) DeepCopyInto(out *SignatureCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureCondition. +func (in *SignatureCondition) DeepCopy() *SignatureCondition { + if in == nil { + return nil + } + out := new(SignatureCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureGenericEntity) DeepCopyInto(out *SignatureGenericEntity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureGenericEntity. +func (in *SignatureGenericEntity) DeepCopy() *SignatureGenericEntity { + if in == nil { + return nil + } + out := new(SignatureGenericEntity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureIssuer) DeepCopyInto(out *SignatureIssuer) { + *out = *in + out.SignatureGenericEntity = in.SignatureGenericEntity + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureIssuer. +func (in *SignatureIssuer) DeepCopy() *SignatureIssuer { + if in == nil { + return nil + } + out := new(SignatureIssuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureSubject) DeepCopyInto(out *SignatureSubject) { + *out = *in + out.SignatureGenericEntity = in.SignatureGenericEntity + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureSubject. +func (in *SignatureSubject) DeepCopy() *SignatureSubject { + if in == nil { + return nil + } + out := new(SignatureSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagEvent) DeepCopyInto(out *TagEvent) { + *out = *in + in.Created.DeepCopyInto(&out.Created) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagEvent. +func (in *TagEvent) DeepCopy() *TagEvent { + if in == nil { + return nil + } + out := new(TagEvent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagEventCondition) DeepCopyInto(out *TagEventCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagEventCondition. +func (in *TagEventCondition) DeepCopy() *TagEventCondition { + if in == nil { + return nil + } + out := new(TagEventCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagImportPolicy) DeepCopyInto(out *TagImportPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagImportPolicy. +func (in *TagImportPolicy) DeepCopy() *TagImportPolicy { + if in == nil { + return nil + } + out := new(TagImportPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagReference) DeepCopyInto(out *TagReference) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(corev1.ObjectReference) + **out = **in + } + if in.Generation != nil { + in, out := &in.Generation, &out.Generation + *out = new(int64) + **out = **in + } + out.ImportPolicy = in.ImportPolicy + out.ReferencePolicy = in.ReferencePolicy + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagReference. +func (in *TagReference) DeepCopy() *TagReference { + if in == nil { + return nil + } + out := new(TagReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagReferencePolicy) DeepCopyInto(out *TagReferencePolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagReferencePolicy. +func (in *TagReferencePolicy) DeepCopy() *TagReferencePolicy { + if in == nil { + return nil + } + out := new(TagReferencePolicy) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..a50ffbbc52472729b2d76172917b7eba82ace2fe --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,413 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_DockerImageReference = map[string]string{ + "": "DockerImageReference points to a container image.", + "Registry": "Registry is the registry that contains the container image", + "Namespace": "Namespace is the namespace that contains the container image", + "Name": "Name is the name of the container image", + "Tag": "Tag is which tag of the container image is being referenced", + "ID": "ID is the identifier for the container image", +} + +func (DockerImageReference) SwaggerDoc() map[string]string { + return map_DockerImageReference +} + +var map_Image = map[string]string{ + "": "Image is an immutable representation of a container image and metadata at a point in time. Images are named by taking a hash of their contents (metadata and content) and any change in format, content, or metadata results in a new name. The images resource is primarily for use by cluster administrators and integrations like the cluster image registry - end users instead access images via the imagestreamtags or imagestreamimages resources. While image metadata is stored in the API, any integration that implements the container image registry API must provide its own storage for the raw manifest data, image config, and layer contents.", + "dockerImageReference": "DockerImageReference is the string that can be used to pull this image.", + "dockerImageMetadata": "DockerImageMetadata contains metadata about this image", + "dockerImageMetadataVersion": "DockerImageMetadataVersion conveys the version of the object, which if empty defaults to \"1.0\"", + "dockerImageManifest": "DockerImageManifest is the raw JSON of the manifest", + "dockerImageLayers": "DockerImageLayers represents the layers in the image. May not be set if the image does not define that data.", + "signatures": "Signatures holds all signatures of the image.", + "dockerImageSignatures": "DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1.", + "dockerImageManifestMediaType": "DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2.", + "dockerImageConfig": "DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2.", +} + +func (Image) SwaggerDoc() map[string]string { + return map_Image +} + +var map_ImageBlobReferences = map[string]string{ + "": "ImageBlobReferences describes the blob references within an image.", + "imageMissing": "imageMissing is true if the image is referenced by the image stream but the image object has been deleted from the API by an administrator. When this field is set, layers and config fields may be empty and callers that depend on the image metadata should consider the image to be unavailable for download or viewing.", + "layers": "layers is the list of blobs that compose this image, from base layer to top layer. All layers referenced by this array will be defined in the blobs map. Some images may have zero layers.", + "config": "config, if set, is the blob that contains the image config. Some images do not have separate config blobs and this field will be set to nil if so.", +} + +func (ImageBlobReferences) SwaggerDoc() map[string]string { + return map_ImageBlobReferences +} + +var map_ImageImportSpec = map[string]string{ + "": "ImageImportSpec describes a request to import a specific image.", + "from": "From is the source of an image to import; only kind DockerImage is allowed", + "to": "To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used", + "importPolicy": "ImportPolicy is the policy controlling how the image is imported", + "referencePolicy": "ReferencePolicy defines how other components should consume the image", + "includeManifest": "IncludeManifest determines if the manifest for each image is returned in the response", +} + +func (ImageImportSpec) SwaggerDoc() map[string]string { + return map_ImageImportSpec +} + +var map_ImageImportStatus = map[string]string{ + "": "ImageImportStatus describes the result of an image import.", + "status": "Status is the status of the image import, including errors encountered while retrieving the image", + "image": "Image is the metadata of that image, if the image was located", + "tag": "Tag is the tag this image was located under, if any", +} + +func (ImageImportStatus) SwaggerDoc() map[string]string { + return map_ImageImportStatus +} + +var map_ImageLayer = map[string]string{ + "": "ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none.", + "name": "Name of the layer as defined by the underlying store.", + "size": "Size of the layer in bytes as defined by the underlying store.", + "mediaType": "MediaType of the referenced object.", +} + +func (ImageLayer) SwaggerDoc() map[string]string { + return map_ImageLayer +} + +var map_ImageLayerData = map[string]string{ + "": "ImageLayerData contains metadata about an image layer.", + "size": "Size of the layer in bytes as defined by the underlying store. This field is optional if the necessary information about size is not available.", + "mediaType": "MediaType of the referenced object.", +} + +func (ImageLayerData) SwaggerDoc() map[string]string { + return map_ImageLayerData +} + +var map_ImageList = map[string]string{ + "": "ImageList is a list of Image objects.", + "items": "Items is a list of images", +} + +func (ImageList) SwaggerDoc() map[string]string { + return map_ImageList +} + +var map_ImageLookupPolicy = map[string]string{ + "": "ImageLookupPolicy describes how an image stream can be used to override the image references used by pods, builds, and other resources in a namespace.", + "local": "local will change the docker short image references (like \"mysql\" or \"php:latest\") on objects in this namespace to the image ID whenever they match this image stream, instead of reaching out to a remote registry. The name will be fully qualified to an image ID if found. The tag's referencePolicy is taken into account on the replaced value. Only works within the current namespace.", +} + +func (ImageLookupPolicy) SwaggerDoc() map[string]string { + return map_ImageLookupPolicy +} + +var map_ImageSignature = map[string]string{ + "": "ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims as long as the signature is trusted. Based on this information it is possible to restrict runnable images to those matching cluster-wide policy. Mandatory fields should be parsed by clients doing image verification. The others are parsed from signature's content by the server. They serve just an informative purpose.", + "type": "Required: Describes a type of stored blob.", + "content": "Required: An opaque binary string which is an image's signature.", + "conditions": "Conditions represent the latest available observations of a signature's current state.", + "imageIdentity": "A human readable string representing image's identity. It could be a product name and version, or an image pull spec (e.g. \"registry.access.redhat.com/rhel7/rhel:7.2\").", + "signedClaims": "Contains claims from the signature.", + "created": "If specified, it is the time of signature's creation.", + "issuedBy": "If specified, it holds information about an issuer of signing certificate or key (a person or entity who signed the signing certificate or key).", + "issuedTo": "If specified, it holds information about a subject of signing certificate or key (a person or entity who signed the image).", +} + +func (ImageSignature) SwaggerDoc() map[string]string { + return map_ImageSignature +} + +var map_ImageStream = map[string]string{ + "": "An ImageStream stores a mapping of tags to images, metadata overrides that are applied when images are tagged in a stream, and an optional reference to a container image repository on a registry. Users typically update the spec.tags field to point to external images which are imported from container registries using credentials in your namespace with the pull secret type, or to existing image stream tags and images which are immediately accessible for tagging or pulling. The history of images applied to a tag is visible in the status.tags field and any user who can view an image stream is allowed to tag that image into their own image streams. Access to pull images from the integrated registry is granted by having the \"get imagestreams/layers\" permission on a given image stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both spec and status for that tag to be removed. Image stream history is retained until an administrator runs the prune operation, which removes references that are no longer in use. To preserve a historical image, ensure there is a tag in spec pointing to that image by its digest.", + "spec": "Spec describes the desired state of this stream", + "status": "Status describes the current state of this stream", +} + +func (ImageStream) SwaggerDoc() map[string]string { + return map_ImageStream +} + +var map_ImageStreamImage = map[string]string{ + "": "ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. User interfaces and regular users can use this resource to access the metadata details of a tagged image in the image stream history for viewing, since Image resources are not directly accessible to end users. A not found error will be returned if no such image is referenced by a tag within the ImageStream. Images are created when spec tags are set on an image stream that represent an image in an external registry, when pushing to the integrated registry, or when tagging an existing image from one image stream to another. The name of an image stream image is in the form \"<STREAM>@<DIGEST>\", where the digest is the content addressible identifier for the image (sha256:xxxxx...). You can use ImageStreamImages as the from.kind of an image stream spec tag to reference an image exactly. The only operations supported on the imagestreamimage endpoint are retrieving the image.", + "image": "Image associated with the ImageStream and image name.", +} + +func (ImageStreamImage) SwaggerDoc() map[string]string { + return map_ImageStreamImage +} + +var map_ImageStreamImport = map[string]string{ + "": "The image stream import resource provides an easy way for a user to find and import container images from other container image registries into the server. Individual images or an entire image repository may be imported, and users may choose to see the results of the import prior to tagging the resulting images into the specified image stream.\n\nThis API is intended for end-user tools that need to see the metadata of the image prior to import (for instance, to generate an application from it). Clients that know the desired image can continue to create spec.tags directly into their image streams.", + "spec": "Spec is a description of the images that the user wishes to import", + "status": "Status is the the result of importing the image", +} + +func (ImageStreamImport) SwaggerDoc() map[string]string { + return map_ImageStreamImport +} + +var map_ImageStreamImportSpec = map[string]string{ + "": "ImageStreamImportSpec defines what images should be imported.", + "import": "Import indicates whether to perform an import - if so, the specified tags are set on the spec and status of the image stream defined by the type meta.", + "repository": "Repository is an optional import of an entire container image repository. A maximum limit on the number of tags imported this way is imposed by the server.", + "images": "Images are a list of individual images to import.", +} + +func (ImageStreamImportSpec) SwaggerDoc() map[string]string { + return map_ImageStreamImportSpec +} + +var map_ImageStreamImportStatus = map[string]string{ + "": "ImageStreamImportStatus contains information about the status of an image stream import.", + "import": "Import is the image stream that was successfully updated or created when 'to' was set.", + "repository": "Repository is set if spec.repository was set to the outcome of the import", + "images": "Images is set with the result of importing spec.images", +} + +func (ImageStreamImportStatus) SwaggerDoc() map[string]string { + return map_ImageStreamImportStatus +} + +var map_ImageStreamLayers = map[string]string{ + "": "ImageStreamLayers describes information about the layers referenced by images in this image stream.", + "blobs": "blobs is a map of blob name to metadata about the blob.", + "images": "images is a map between an image name and the names of the blobs and config that comprise the image.", +} + +func (ImageStreamLayers) SwaggerDoc() map[string]string { + return map_ImageStreamLayers +} + +var map_ImageStreamList = map[string]string{ + "": "ImageStreamList is a list of ImageStream objects.", + "items": "Items is a list of imageStreams", +} + +func (ImageStreamList) SwaggerDoc() map[string]string { + return map_ImageStreamList +} + +var map_ImageStreamMapping = map[string]string{ + "": "ImageStreamMapping represents a mapping from a single image stream tag to a container image as well as the reference to the container image stream the image came from. This resource is used by privileged integrators to create an image resource and to associate it with an image stream in the status tags field. Creating an ImageStreamMapping will allow any user who can view the image stream to tag or pull that image, so only create mappings where the user has proven they have access to the image contents directly. The only operation supported for this resource is create and the metadata name and namespace should be set to the image stream containing the tag that should be updated.", + "image": "Image is a container image.", + "tag": "Tag is a string value this image can be located with inside the stream.", +} + +func (ImageStreamMapping) SwaggerDoc() map[string]string { + return map_ImageStreamMapping +} + +var map_ImageStreamSpec = map[string]string{ + "": "ImageStreamSpec represents options for ImageStreams.", + "lookupPolicy": "lookupPolicy controls how other resources reference images within this namespace.", + "dockerImageRepository": "dockerImageRepository is optional, if specified this stream is backed by a container repository on this server Deprecated: This field is deprecated as of v3.7 and will be removed in a future release. Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead.", + "tags": "tags map arbitrary string values to specific image locators", +} + +func (ImageStreamSpec) SwaggerDoc() map[string]string { + return map_ImageStreamSpec +} + +var map_ImageStreamStatus = map[string]string{ + "": "ImageStreamStatus contains information about the state of this image stream.", + "dockerImageRepository": "DockerImageRepository represents the effective location this stream may be accessed at. May be empty until the server determines where the repository is located", + "publicDockerImageRepository": "PublicDockerImageRepository represents the public location from where the image can be pulled outside the cluster. This field may be empty if the administrator has not exposed the integrated registry externally.", + "tags": "Tags are a historical record of images associated with each tag. The first entry in the TagEvent array is the currently tagged image.", +} + +func (ImageStreamStatus) SwaggerDoc() map[string]string { + return map_ImageStreamStatus +} + +var map_ImageStreamTag = map[string]string{ + "": "ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream. Use this resource to interact with the tags and images in an image stream by tag, or to see the image details for a particular tag. The image associated with this resource is the most recently successfully tagged, imported, or pushed image (as described in the image stream status.tags.items list for this tag). If an import is in progress or has failed the previous image will be shown. Deleting an image stream tag clears both the status and spec fields of an image stream. If no image can be retrieved for a given tag, a not found error will be returned.", + "tag": "tag is the spec tag associated with this image stream tag, and it may be null if only pushes have occurred to this image stream.", + "generation": "generation is the current generation of the tagged image - if tag is provided and this value is not equal to the tag generation, a user has requested an import that has not completed, or conditions will be filled out indicating any error.", + "lookupPolicy": "lookupPolicy indicates whether this tag will handle image references in this namespace.", + "conditions": "conditions is an array of conditions that apply to the image stream tag.", + "image": "image associated with the ImageStream and tag.", +} + +func (ImageStreamTag) SwaggerDoc() map[string]string { + return map_ImageStreamTag +} + +var map_ImageStreamTagList = map[string]string{ + "": "ImageStreamTagList is a list of ImageStreamTag objects.", + "items": "Items is the list of image stream tags", +} + +func (ImageStreamTagList) SwaggerDoc() map[string]string { + return map_ImageStreamTagList +} + +var map_ImageTag = map[string]string{ + "": "ImageTag represents a single tag within an image stream and includes the spec, the status history, and the currently referenced image (if any) of the provided tag. This type replaces the ImageStreamTag by providing a full view of the tag. ImageTags are returned for every spec or status tag present on the image stream. If no tag exists in either form a not found error will be returned by the API. A create operation will succeed if no spec tag has already been defined and the spec field is set. Delete will remove both spec and status elements from the image stream.", + "spec": "spec is the spec tag associated with this image stream tag, and it may be null if only pushes have occurred to this image stream.", + "status": "status is the status tag details associated with this image stream tag, and it may be null if no push or import has been performed.", + "image": "image is the details of the most recent image stream status tag, and it may be null if import has not completed or an administrator has deleted the image object. To verify this is the most recent image, you must verify the generation of the most recent status.items entry matches the spec tag (if a spec tag is set). This field will not be set when listing image tags.", +} + +func (ImageTag) SwaggerDoc() map[string]string { + return map_ImageTag +} + +var map_ImageTagList = map[string]string{ + "": "ImageTagList is a list of ImageTag objects. When listing image tags, the image field is not populated. Tags are returned in alphabetical order by image stream and then tag.", + "items": "Items is the list of image stream tags", +} + +func (ImageTagList) SwaggerDoc() map[string]string { + return map_ImageTagList +} + +var map_NamedTagEventList = map[string]string{ + "": "NamedTagEventList relates a tag to its image history.", + "tag": "Tag is the tag for which the history is recorded", + "items": "Standard object's metadata.", + "conditions": "Conditions is an array of conditions that apply to the tag event list.", +} + +func (NamedTagEventList) SwaggerDoc() map[string]string { + return map_NamedTagEventList +} + +var map_RepositoryImportSpec = map[string]string{ + "": "RepositoryImportSpec describes a request to import images from a container image repository.", + "from": "From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed", + "importPolicy": "ImportPolicy is the policy controlling how the image is imported", + "referencePolicy": "ReferencePolicy defines how other components should consume the image", + "includeManifest": "IncludeManifest determines if the manifest for each image is returned in the response", +} + +func (RepositoryImportSpec) SwaggerDoc() map[string]string { + return map_RepositoryImportSpec +} + +var map_RepositoryImportStatus = map[string]string{ + "": "RepositoryImportStatus describes the result of an image repository import", + "status": "Status reflects whether any failure occurred during import", + "images": "Images is a list of images successfully retrieved by the import of the repository.", + "additionalTags": "AdditionalTags are tags that exist in the repository but were not imported because a maximum limit of automatic imports was applied.", +} + +func (RepositoryImportStatus) SwaggerDoc() map[string]string { + return map_RepositoryImportStatus +} + +var map_SignatureCondition = map[string]string{ + "": "SignatureCondition describes an image signature condition of particular kind at particular probe time.", + "type": "Type of signature condition, Complete or Failed.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastProbeTime": "Last time the condition was checked.", + "lastTransitionTime": "Last time the condition transit from one status to another.", + "reason": "(brief) reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (SignatureCondition) SwaggerDoc() map[string]string { + return map_SignatureCondition +} + +var map_SignatureGenericEntity = map[string]string{ + "": "SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject of signing certificate or key.", + "organization": "Organization name.", + "commonName": "Common name (e.g. openshift-signing-service).", +} + +func (SignatureGenericEntity) SwaggerDoc() map[string]string { + return map_SignatureGenericEntity +} + +var map_SignatureIssuer = map[string]string{ + "": "SignatureIssuer holds information about an issuer of signing certificate or key.", +} + +func (SignatureIssuer) SwaggerDoc() map[string]string { + return map_SignatureIssuer +} + +var map_SignatureSubject = map[string]string{ + "": "SignatureSubject holds information about a person or entity who created the signature.", + "publicKeyID": "If present, it is a human readable key id of public key belonging to the subject used to verify image signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g. 0x685ebe62bf278440).", +} + +func (SignatureSubject) SwaggerDoc() map[string]string { + return map_SignatureSubject +} + +var map_TagEvent = map[string]string{ + "": "TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag.", + "created": "Created holds the time the TagEvent was created", + "dockerImageReference": "DockerImageReference is the string that can be used to pull this image", + "image": "Image is the image", + "generation": "Generation is the spec tag generation that resulted in this tag being updated", +} + +func (TagEvent) SwaggerDoc() map[string]string { + return map_TagEvent +} + +var map_TagEventCondition = map[string]string{ + "": "TagEventCondition contains condition information for a tag event.", + "type": "Type of tag event condition, currently only ImportSuccess", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "LastTransitionTIme is the time the condition transitioned from one status to another.", + "reason": "Reason is a brief machine readable explanation for the condition's last transition.", + "message": "Message is a human readable description of the details about last transition, complementing reason.", + "generation": "Generation is the spec tag generation that this status corresponds to", +} + +func (TagEventCondition) SwaggerDoc() map[string]string { + return map_TagEventCondition +} + +var map_TagImportPolicy = map[string]string{ + "": "TagImportPolicy controls how images related to this tag will be imported.", + "insecure": "Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import.", + "scheduled": "Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported", +} + +func (TagImportPolicy) SwaggerDoc() map[string]string { + return map_TagImportPolicy +} + +var map_TagReference = map[string]string{ + "": "TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track.", + "name": "Name of the tag", + "annotations": "Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags.", + "from": "Optional; if specified, a reference to another image that this tag should point to. Valid values are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references can only reference a tag within this same ImageStream.", + "reference": "Reference states if the tag will be imported. Default value is false, which means the tag will be imported.", + "generation": "Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference is changed the generation is set to match the current stream generation (which is incremented every time spec is changed). Other processes in the system like the image importer observe that the generation of spec tag is newer than the generation recorded in the status and use that as a trigger to import the newest remote tag. To trigger a new import, clients may set this value to zero which will reset the generation to the latest stream generation. Legacy clients will send this value as nil which will be merged with the current tag generation.", + "importPolicy": "ImportPolicy is information that controls how images may be imported by the server.", + "referencePolicy": "ReferencePolicy defines how other components should consume the image.", +} + +func (TagReference) SwaggerDoc() map[string]string { + return map_TagReference +} + +var map_TagReferencePolicy = map[string]string{ + "": "TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when image change triggers in deployment configs or builds are resolved. This allows the image stream author to control how images are accessed.", + "type": "Type determines how the image pull spec should be transformed when the image stream tag is used in deployment config triggers or new builds. The default value is `Source`, indicating the original location of the image should be used (if imported). The user may also specify `Local`, indicating that the pull spec should point to the integrated container image registry and leverage the registry's ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this image to be managed from the image stream's namespace, so others on the platform can access a remote image but have no access to the remote secret. It also allows the image layers to be mirrored into the local registry which the images can still be pulled even if the upstream registry is unavailable.", +} + +func (TagReferencePolicy) SwaggerDoc() map[string]string { + return map_TagReferencePolicy +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/imageregistry/install.go b/vendor/github.com/openshift/api/imageregistry/install.go new file mode 100644 index 0000000000000000000000000000000000000000..4536c8f40359944f7f35cd3162cdc70c94d597b1 --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/install.go @@ -0,0 +1,26 @@ +package imageregistry + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + imageregistryv1 "github.com/openshift/api/imageregistry/v1" +) + +const ( + GroupName = "imageregistry.operator.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(imageregistryv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/imageregistry/v1/00-crd.yaml b/vendor/github.com/openshift/api/imageregistry/v1/00-crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..801a20baec6e41fbec9fdb71b36f9306a1ccd407 --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/00-crd.yaml @@ -0,0 +1,617 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: configs.imageregistry.operator.openshift.io +spec: + group: imageregistry.operator.openshift.io + scope: Cluster + version: v1 + names: + kind: Config + listKind: ConfigList + plural: configs + singular: config + preserveUnknownFields: false + subresources: + status: {} + validation: + openAPIV3Schema: + description: Config is the configuration object for a registry instance managed + by the registry operator + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ImageRegistrySpec defines the specs for the running registry. + type: object + required: + - logging + - managementState + - replicas + properties: + defaultRoute: + description: defaultRoute indicates whether an external facing route + for the registry should be created using the default generated hostname. + type: boolean + disableRedirect: + description: disableRedirect controls whether to route all data through + the Registry, rather than redirecting to the backend. + type: boolean + httpSecret: + description: httpSecret is the value needed by the registry to secure + uploads, generated by default. + type: string + logging: + description: logging determines the level of logging enabled in the + registry. + type: integer + format: int64 + managementState: + description: managementState indicates whether the registry instance + represented by this config instance is under operator management or + not. Valid values are Managed, Unmanaged, and Removed. + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + nodeSelector: + description: nodeSelector defines the node selection constraints for + the registry pod. + type: object + additionalProperties: + type: string + proxy: + description: proxy defines the proxy to be used when calling master + api, upstream registries, etc. + type: object + properties: + http: + description: http defines the proxy to be used by the image registry + when accessing HTTP endpoints. + type: string + https: + description: https defines the proxy to be used by the image registry + when accessing HTTPS endpoints. + type: string + noProxy: + description: noProxy defines a comma-separated list of host names + that shouldn't go through any proxy. + type: string + readOnly: + description: readOnly indicates whether the registry instance should + reject attempts to push new images or delete existing ones. + type: boolean + replicas: + description: replicas determines the number of registry instances to + run. + type: integer + format: int32 + requests: + description: requests controls how many parallel requests a given registry + instance will handle before queuing additional requests. + type: object + properties: + read: + description: read defines limits for image registry's reads. + type: object + properties: + maxInQueue: + description: maxInQueue sets the maximum queued api requests + to the registry. + type: integer + maxRunning: + description: maxRunning sets the maximum in flight api requests + to the registry. + type: integer + maxWaitInQueue: + description: maxWaitInQueue sets the maximum time a request + can wait in the queue before being rejected. + type: string + write: + description: write defines limits for image registry's writes. + type: object + properties: + maxInQueue: + description: maxInQueue sets the maximum queued api requests + to the registry. + type: integer + maxRunning: + description: maxRunning sets the maximum in flight api requests + to the registry. + type: integer + maxWaitInQueue: + description: maxWaitInQueue sets the maximum time a request + can wait in the queue before being rejected. + type: string + resources: + description: resources defines the resource requests+limits for the + registry pod. + type: object + properties: + limits: + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + additionalProperties: + type: string + requests: + description: 'Requests describes the minimum amount of compute resources + required. If Requests is omitted for a container, it defaults + to Limits if that is explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + additionalProperties: + type: string + rolloutStrategy: + description: rolloutStrategy defines rollout strategy for the image + registry deployment. + type: string + pattern: ^(RollingUpdate|Recreate)$ + routes: + description: routes defines additional external facing routes which + should be created for the registry. + type: array + items: + description: ImageRegistryConfigRoute holds information on external + route access to image registry. + type: object + required: + - name + properties: + hostname: + description: hostname for the route. + type: string + name: + description: name of the route to be created. + type: string + secretName: + description: secretName points to secret containing the certificates + to be used by the route. + type: string + storage: + description: storage details for configuring registry storage, e.g. + S3 bucket coordinates. + type: object + properties: + azure: + description: azure represents configuration that uses Azure Blob + Storage. + type: object + properties: + accountName: + description: accountName defines the account to be used by the + registry. + type: string + container: + description: container defines Azure's container to be used + by registry. + type: string + maxLength: 63 + minLength: 3 + pattern: ^[0-9a-z]+(-[0-9a-z]+)*$ + emptyDir: + description: 'emptyDir represents ephemeral storage on the pod''s + host node. WARNING: this storage cannot be used with more than + 1 replica and is not suitable for production use. When the pod + is removed from a node for any reason, the data in the emptyDir + is deleted forever.' + type: object + gcs: + description: gcs represents configuration that uses Google Cloud + Storage. + type: object + properties: + bucket: + description: bucket is the bucket name in which you want to + store the registry's data. Optional, will be generated if + not provided. + type: string + keyID: + description: keyID is the KMS key ID to use for encryption. + Optional, buckets are encrypted by default on GCP. This allows + for the use of a custom encryption key. + type: string + projectID: + description: projectID is the Project ID of the GCP project + that this bucket should be associated with. + type: string + region: + description: region is the GCS location in which your bucket + exists. Optional, will be set based on the installed GCS Region. + type: string + pvc: + description: pvc represents configuration that uses a PersistentVolumeClaim. + type: object + properties: + claim: + description: claim defines the Persisent Volume Claim's name + to be used. + type: string + s3: + description: s3 represents configuration that uses Amazon Simple + Storage Service. + type: object + properties: + bucket: + description: bucket is the bucket name in which you want to + store the registry's data. Optional, will be generated if + not provided. + type: string + cloudFront: + description: cloudFront configures Amazon Cloudfront as the + storage middleware in a registry. + type: object + required: + - baseURL + - keypairID + - privateKey + properties: + baseURL: + description: baseURL contains the SCHEME://HOST[/PATH] at + which Cloudfront is served. + type: string + duration: + description: duration is the duration of the Cloudfront + session. + type: string + keypairID: + description: keypairID is key pair ID provided by AWS. + type: string + privateKey: + description: privateKey points to secret containing the + private key, provided by AWS. + type: object + required: + - key + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + encrypt: + description: encrypt specifies whether the registry stores the + image in encrypted format or not. Optional, defaults to false. + type: boolean + keyID: + description: keyID is the KMS key ID to use for encryption. + Optional, Encrypt must be true, or this parameter is ignored. + type: string + region: + description: region is the AWS region in which your bucket exists. + Optional, will be set based on the installed AWS Region. + type: string + regionEndpoint: + description: regionEndpoint is the endpoint for S3 compatible + storage services. Optional, defaults based on the Region that + is provided. + type: string + swift: + description: swift represents configuration that uses OpenStack + Object Storage. + type: object + properties: + authURL: + description: authURL defines the URL for obtaining an authentication + token. + type: string + authVersion: + description: authVersion specifies the OpenStack Auth's version. + type: string + container: + description: container defines the name of Swift container where + to store the registry's data. + type: string + domain: + description: domain specifies Openstack's domain name for Identity + v3 API. + type: string + domainID: + description: domainID specifies Openstack's domain id for Identity + v3 API. + type: string + regionName: + description: regionName defines Openstack's region in which + container exists. + type: string + tenant: + description: tenant defines Openstack tenant name to be used + by registry. + type: string + tenantID: + description: tenant defines Openstack tenant id to be used by + registry. + type: string + tolerations: + description: tolerations defines the tolerations for the registry pod. + type: array + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple <key,value,effect> using the matching + operator <operator>. + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, operator + must be Exists; this combination means to match all values and + all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. Exists + is equivalent to wildcard for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the + toleration (which must be of effect NoExecute, otherwise this + field is ignored) tolerates the taint. By default, it is not + set, which means tolerate the taint forever (do not evict). + Zero and negative values will be treated as 0 (evict immediately) + by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise + just a regular string. + type: string + status: + description: ImageRegistryStatus reports image registry operational status. + type: object + required: + - storage + - storageManaged + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to + be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a + given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + storage: + description: storage indicates the current applied storage configuration + of the registry. + type: object + properties: + azure: + description: azure represents configuration that uses Azure Blob + Storage. + type: object + properties: + accountName: + description: accountName defines the account to be used by the + registry. + type: string + container: + description: container defines Azure's container to be used + by registry. + type: string + maxLength: 63 + minLength: 3 + pattern: ^[0-9a-z]+(-[0-9a-z]+)*$ + emptyDir: + description: 'emptyDir represents ephemeral storage on the pod''s + host node. WARNING: this storage cannot be used with more than + 1 replica and is not suitable for production use. When the pod + is removed from a node for any reason, the data in the emptyDir + is deleted forever.' + type: object + gcs: + description: gcs represents configuration that uses Google Cloud + Storage. + type: object + properties: + bucket: + description: bucket is the bucket name in which you want to + store the registry's data. Optional, will be generated if + not provided. + type: string + keyID: + description: keyID is the KMS key ID to use for encryption. + Optional, buckets are encrypted by default on GCP. This allows + for the use of a custom encryption key. + type: string + projectID: + description: projectID is the Project ID of the GCP project + that this bucket should be associated with. + type: string + region: + description: region is the GCS location in which your bucket + exists. Optional, will be set based on the installed GCS Region. + type: string + pvc: + description: pvc represents configuration that uses a PersistentVolumeClaim. + type: object + properties: + claim: + description: claim defines the Persisent Volume Claim's name + to be used. + type: string + s3: + description: s3 represents configuration that uses Amazon Simple + Storage Service. + type: object + properties: + bucket: + description: bucket is the bucket name in which you want to + store the registry's data. Optional, will be generated if + not provided. + type: string + cloudFront: + description: cloudFront configures Amazon Cloudfront as the + storage middleware in a registry. + type: object + required: + - baseURL + - keypairID + - privateKey + properties: + baseURL: + description: baseURL contains the SCHEME://HOST[/PATH] at + which Cloudfront is served. + type: string + duration: + description: duration is the duration of the Cloudfront + session. + type: string + keypairID: + description: keypairID is key pair ID provided by AWS. + type: string + privateKey: + description: privateKey points to secret containing the + private key, provided by AWS. + type: object + required: + - key + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + encrypt: + description: encrypt specifies whether the registry stores the + image in encrypted format or not. Optional, defaults to false. + type: boolean + keyID: + description: keyID is the KMS key ID to use for encryption. + Optional, Encrypt must be true, or this parameter is ignored. + type: string + region: + description: region is the AWS region in which your bucket exists. + Optional, will be set based on the installed AWS Region. + type: string + regionEndpoint: + description: regionEndpoint is the endpoint for S3 compatible + storage services. Optional, defaults based on the Region that + is provided. + type: string + swift: + description: swift represents configuration that uses OpenStack + Object Storage. + type: object + properties: + authURL: + description: authURL defines the URL for obtaining an authentication + token. + type: string + authVersion: + description: authVersion specifies the OpenStack Auth's version. + type: string + container: + description: container defines the name of Swift container where + to store the registry's data. + type: string + domain: + description: domain specifies Openstack's domain name for Identity + v3 API. + type: string + domainID: + description: domainID specifies Openstack's domain id for Identity + v3 API. + type: string + regionName: + description: regionName defines Openstack's region in which + container exists. + type: string + tenant: + description: tenant defines Openstack tenant name to be used + by registry. + type: string + tenantID: + description: tenant defines Openstack tenant id to be used by + registry. + type: string + storageManaged: + description: storageManaged is a boolean which denotes whether or not + we created the registry storage medium (such as an S3 bucket). + type: boolean + version: + description: version is the level this availability applies to + type: string diff --git a/vendor/github.com/openshift/api/imageregistry/v1/01-crd.yaml b/vendor/github.com/openshift/api/imageregistry/v1/01-crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..588eb64cc9c68299e6008f241a8ee208f01331f0 --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/01-crd.yaml @@ -0,0 +1,736 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: imagepruners.imageregistry.operator.openshift.io +spec: + group: imageregistry.operator.openshift.io + scope: Cluster + version: v1 + preserveUnknownFields: false + subresources: + status: {} + names: + kind: ImagePruner + listKind: ImagePrunerList + plural: imagepruners + singular: imagepruner + "validation": + "openAPIV3Schema": + description: ImagePruner is the configuration object for an image registry pruner + managed by the registry operator. + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ImagePrunerSpec defines the specs for the running image pruner. + type: object + properties: + affinity: + description: affinity is a group of node affinity scheduling rules for + the image pruner pod. + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the affinity expressions specified by this field, + but it may choose a node that violates one or more of the + expressions. The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node that meets + all of the scheduling requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the sum + if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling term matches all + objects with implicit weight 0 (i.e. it's a no-op). A null + preferred scheduling term matches no objects (i.e. is also + a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + type: array + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements + by node's fields. + type: array + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + type: array + items: + type: string + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The + terms are ORed. + type: array + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + type: array + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements + by node's fields. + type: array + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + type: array + items: + type: string + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the affinity expressions specified by this field, + but it may choose a node that violates one or more of the + expressions. The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node that meets + all of the scheduling requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the sum + if the node has pods which matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + type: array + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may not + try to eventually evict the pod from its node. When there + are multiple elements, the lists of nodes corresponding to + each podAffinityTerm are intersected, i.e. all terms must + be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) that + this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key <topologyKey> + matches that of any node on which a pod of the set of pods + is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in + this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any + node on which any of the selected pods is running. Empty + topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some other + pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the anti-affinity expressions specified by this + field, but it may choose a node that violates one or more + of the expressions. The node that is most preferred is the + one with the greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource request, + requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field + and adding "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; the node(s) with + the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + type: array + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will not + be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms must + be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) that + this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key <topologyKey> + matches that of any node on which a pod of the set of pods + is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in + this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any + node on which any of the selected pods is running. Empty + topologyKey is not allowed. + type: string + failedJobsHistoryLimit: + description: failedJobsHistoryLimit specifies how many failed image + pruner jobs to retain. Defaults to 3 if not set. + type: integer + format: int32 + keepTagRevisions: + description: keepTagRevisions specifies the number of image revisions + for a tag in an image stream that will be preserved. Defaults to 5. + type: integer + keepYoungerThan: + description: keepYoungerThan specifies the minimum age of an image and + its referrers for it to be considered a candidate for pruning. Defaults + to 96h (96 hours). + type: integer + format: int64 + nodeSelector: + description: nodeSelector defines the node selection constraints for + the image pruner pod. + type: object + additionalProperties: + type: string + resources: + description: resources defines the resource requests and limits for + the image pruner pod. + type: object + properties: + limits: + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + additionalProperties: + type: string + requests: + description: 'Requests describes the minimum amount of compute resources + required. If Requests is omitted for a container, it defaults + to Limits if that is explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + additionalProperties: + type: string + schedule: + description: 'schedule specifies when to execute the job using standard + cronjob syntax: https://wikipedia.org/wiki/Cron. Defaults to `0 0 + * * *`.' + type: string + successfulJobsHistoryLimit: + description: successfulJobsHistoryLimit specifies how many successful + image pruner jobs to retain. Defaults to 3 if not set. + type: integer + format: int32 + suspend: + description: suspend specifies whether or not to suspend subsequent + executions of this cronjob. Defaults to false. + type: boolean + tolerations: + description: tolerations defines the node tolerations for the image + pruner pod. + type: array + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple <key,value,effect> using the matching + operator <operator>. + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, operator + must be Exists; this combination means to match all values and + all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. Exists + is equivalent to wildcard for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the + toleration (which must be of effect NoExecute, otherwise this + field is ignored) tolerates the taint. By default, it is not + set, which means tolerate the taint forever (do not evict). + Zero and negative values will be treated as 0 (evict immediately) + by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise + just a regular string. + type: string + status: + description: ImagePrunerStatus reports image pruner operational status. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status. + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + observedGeneration: + description: observedGeneration is the last generation change that has + been applied. + type: integer + format: int64 diff --git a/vendor/github.com/openshift/api/imageregistry/v1/doc.go b/vendor/github.com/openshift/api/imageregistry/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..32ad6f81414dfb160ee51db081823b7d889fefd4 --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/doc.go @@ -0,0 +1,3 @@ +// +k8s:deepcopy-gen=package +// +groupName=imageregistry.operator.openshift.io +package v1 diff --git a/vendor/github.com/openshift/api/imageregistry/v1/register.go b/vendor/github.com/openshift/api/imageregistry/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..9b83e9dab767071488c4122dc8781592555d74b0 --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/register.go @@ -0,0 +1,43 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + version = "v1" + groupName = "imageregistry.operator.openshift.io" +) + +var ( + scheme = runtime.NewScheme() + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme + // SchemeGroupVersion is the group version used to register these objects. + SchemeGroupVersion = schema.GroupVersion{Group: groupName, Version: version} + // Install is a function which adds this version to a scheme + Install = SchemeBuilder.AddToScheme +) + +func init() { + AddToScheme(scheme) +} + +// addKnownTypes adds the set of types defined in this package to the supplied scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Config{}, + &ConfigList{}, + &ImagePruner{}, + &ImagePrunerList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/github.com/openshift/api/imageregistry/v1/types.go b/vendor/github.com/openshift/api/imageregistry/v1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..a784b407108f94a33883b4d6463fc8cb9a9a4ddd --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/types.go @@ -0,0 +1,311 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigList is a slice of Config objects. +type ConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Items []Config `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Config is the configuration object for a registry instance managed by +// the registry operator +type Config struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + Spec ImageRegistrySpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // +optional + Status ImageRegistryStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ImageRegistrySpec defines the specs for the running registry. +type ImageRegistrySpec struct { + // managementState indicates whether the registry instance represented + // by this config instance is under operator management or not. Valid + // values are Managed, Unmanaged, and Removed. + ManagementState operatorv1.ManagementState `json:"managementState" protobuf:"bytes,1,opt,name=managementState,casttype=github.com/openshift/api/operator/v1.ManagementState"` + // httpSecret is the value needed by the registry to secure uploads, generated by default. + // +optional + HTTPSecret string `json:"httpSecret,omitempty" protobuf:"bytes,2,opt,name=httpSecret"` + // proxy defines the proxy to be used when calling master api, upstream + // registries, etc. + // +optional + Proxy ImageRegistryConfigProxy `json:"proxy,omitempty" protobuf:"bytes,3,opt,name=proxy"` + // storage details for configuring registry storage, e.g. S3 bucket + // coordinates. + // +optional + Storage ImageRegistryConfigStorage `json:"storage,omitempty" protobuf:"bytes,4,opt,name=storage"` + // readOnly indicates whether the registry instance should reject attempts + // to push new images or delete existing ones. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"` + // disableRedirect controls whether to route all data through the Registry, + // rather than redirecting to the backend. + // +optional + DisableRedirect bool `json:"disableRedirect,omitempty" protobuf:"varint,6,opt,name=disableRedirect"` + // requests controls how many parallel requests a given registry instance + // will handle before queuing additional requests. + // +optional + Requests ImageRegistryConfigRequests `json:"requests,omitempty" protobuf:"bytes,7,opt,name=requests"` + // defaultRoute indicates whether an external facing route for the registry + // should be created using the default generated hostname. + // +optional + DefaultRoute bool `json:"defaultRoute,omitempty" protobuf:"varint,8,opt,name=defaultRoute"` + // routes defines additional external facing routes which should be + // created for the registry. + // +optional + Routes []ImageRegistryConfigRoute `json:"routes,omitempty" protobuf:"bytes,9,rep,name=routes"` + // replicas determines the number of registry instances to run. + Replicas int32 `json:"replicas" protobuf:"varint,10,opt,name=replicas"` + // logging determines the level of logging enabled in the registry. + LogLevel int64 `json:"logging" protobuf:"varint,11,opt,name=logging"` + // resources defines the resource requests+limits for the registry pod. + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,12,opt,name=resources"` + // nodeSelector defines the node selection constraints for the registry + // pod. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,13,rep,name=nodeSelector"` + // tolerations defines the tolerations for the registry pod. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,14,rep,name=tolerations"` + // rolloutStrategy defines rollout strategy for the image registry + // deployment. + // +optional + // +kubebuilder:validation:Pattern=`^(RollingUpdate|Recreate)$` + RolloutStrategy string `json:"rolloutStrategy,omitempty" protobuf:"bytes,15,opt,name=rolloutStrategy"` +} + +// ImageRegistryStatus reports image registry operational status. +type ImageRegistryStatus struct { + operatorv1.OperatorStatus `json:",inline" protobuf:"bytes,1,opt,name=operatorStatus"` + + // storageManaged is a boolean which denotes whether or not + // we created the registry storage medium (such as an + // S3 bucket). + StorageManaged bool `json:"storageManaged" protobuf:"varint,2,opt,name=storageManaged"` + // storage indicates the current applied storage configuration of the + // registry. + Storage ImageRegistryConfigStorage `json:"storage" protobuf:"bytes,3,opt,name=storage"` +} + +// ImageRegistryConfigProxy defines proxy configuration to be used by registry. +type ImageRegistryConfigProxy struct { + // http defines the proxy to be used by the image registry when + // accessing HTTP endpoints. + // +optional + HTTP string `json:"http,omitempty" protobuf:"bytes,1,opt,name=http"` + // https defines the proxy to be used by the image registry when + // accessing HTTPS endpoints. + // +optional + HTTPS string `json:"https,omitempty" protobuf:"bytes,2,opt,name=https"` + // noProxy defines a comma-separated list of host names that shouldn't + // go through any proxy. + // +optional + NoProxy string `json:"noProxy,omitempty" protobuf:"bytes,3,opt,name=noProxy"` +} + +// ImageRegistryConfigStorageS3CloudFront holds the configuration +// to use Amazon Cloudfront as the storage middleware in a registry. +// https://docs.docker.com/registry/configuration/#cloudfront +type ImageRegistryConfigStorageS3CloudFront struct { + // baseURL contains the SCHEME://HOST[/PATH] at which Cloudfront is served. + BaseURL string `json:"baseURL" protobuf:"bytes,1,opt,name=baseURL"` + // privateKey points to secret containing the private key, provided by AWS. + PrivateKey corev1.SecretKeySelector `json:"privateKey" protobuf:"bytes,2,opt,name=privateKey"` + // keypairID is key pair ID provided by AWS. + KeypairID string `json:"keypairID" protobuf:"bytes,3,opt,name=keypairID"` + // duration is the duration of the Cloudfront session. + // +optional + Duration metav1.Duration `json:"duration,omitempty" protobuf:"bytes,4,opt,name=duration"` +} + +// ImageRegistryConfigStorageEmptyDir is an place holder to be used when +// when registry is leveraging ephemeral storage. +type ImageRegistryConfigStorageEmptyDir struct { +} + +// ImageRegistryConfigStorageS3 holds the information to configure +// the registry to use the AWS S3 service for backend storage +// https://docs.docker.com/registry/storage-drivers/s3/ +type ImageRegistryConfigStorageS3 struct { + // bucket is the bucket name in which you want to store the registry's + // data. + // Optional, will be generated if not provided. + // +optional + Bucket string `json:"bucket,omitempty" protobuf:"bytes,1,opt,name=bucket"` + // region is the AWS region in which your bucket exists. + // Optional, will be set based on the installed AWS Region. + // +optional + Region string `json:"region,omitempty" protobuf:"bytes,2,opt,name=region"` + // regionEndpoint is the endpoint for S3 compatible storage services. + // Optional, defaults based on the Region that is provided. + // +optional + RegionEndpoint string `json:"regionEndpoint,omitempty" protobuf:"bytes,3,opt,name=regionEndpoint"` + // encrypt specifies whether the registry stores the image in encrypted + // format or not. + // Optional, defaults to false. + // +optional + Encrypt bool `json:"encrypt,omitempty" protobuf:"varint,4,opt,name=encrypt"` + // keyID is the KMS key ID to use for encryption. + // Optional, Encrypt must be true, or this parameter is ignored. + // +optional + KeyID string `json:"keyID,omitempty" protobuf:"bytes,5,opt,name=keyID"` + // cloudFront configures Amazon Cloudfront as the storage middleware in a + // registry. + // +optional + CloudFront *ImageRegistryConfigStorageS3CloudFront `json:"cloudFront,omitempty" protobuf:"bytes,6,opt,name=cloudFront"` +} + +// ImageRegistryConfigStorageGCS holds GCS configuration. +type ImageRegistryConfigStorageGCS struct { + // bucket is the bucket name in which you want to store the registry's + // data. + // Optional, will be generated if not provided. + // +optional + Bucket string `json:"bucket,omitempty" protobuf:"bytes,1,opt,name=bucket"` + // region is the GCS location in which your bucket exists. + // Optional, will be set based on the installed GCS Region. + // +optional + Region string `json:"region,omitempty" protobuf:"bytes,2,opt,name=region"` + // projectID is the Project ID of the GCP project that this bucket should + // be associated with. + // +optional + ProjectID string `json:"projectID,omitempty" protobuf:"bytes,3,opt,name=projectID"` + // keyID is the KMS key ID to use for encryption. + // Optional, buckets are encrypted by default on GCP. + // This allows for the use of a custom encryption key. + // +optional + KeyID string `json:"keyID,omitempty" protobuf:"bytes,4,opt,name=keyID"` +} + +// ImageRegistryConfigStorageSwift holds the information to configure +// the registry to use the OpenStack Swift service for backend storage +// https://docs.docker.com/registry/storage-drivers/swift/ +type ImageRegistryConfigStorageSwift struct { + // authURL defines the URL for obtaining an authentication token. + // +optional + AuthURL string `json:"authURL,omitempty" protobuf:"bytes,1,opt,name=authURL"` + // authVersion specifies the OpenStack Auth's version. + // +optional + AuthVersion string `json:"authVersion,omitempty" protobuf:"bytes,2,opt,name=authVersion"` + // container defines the name of Swift container where to store the + // registry's data. + // +optional + Container string `json:"container,omitempty" protobuf:"bytes,3,opt,name=container"` + // domain specifies Openstack's domain name for Identity v3 API. + // +optional + Domain string `json:"domain,omitempty" protobuf:"bytes,4,opt,name=domain"` + // domainID specifies Openstack's domain id for Identity v3 API. + // +optional + DomainID string `json:"domainID,omitempty" protobuf:"bytes,5,opt,name=domainID"` + // tenant defines Openstack tenant name to be used by registry. + // +optional + Tenant string `json:"tenant,omitempty" protobuf:"bytes,6,opt,name=tenant"` + // tenant defines Openstack tenant id to be used by registry. + // +optional + TenantID string `json:"tenantID,omitempty" protobuf:"bytes,7,opt,name=tenantID"` + // regionName defines Openstack's region in which container exists. + // +optional + RegionName string `json:"regionName,omitempty" protobuf:"bytes,8,opt,name=regionName"` +} + +// ImageRegistryConfigStoragePVC holds Persistent Volume Claims data to +// be used by the registry. +type ImageRegistryConfigStoragePVC struct { + // claim defines the Persisent Volume Claim's name to be used. + // +optional + Claim string `json:"claim,omitempty" protobuf:"bytes,1,opt,name=claim"` +} + +// ImageRegistryConfigStorageAzure holds the information to configure +// the registry to use Azure Blob Storage for backend storage. +type ImageRegistryConfigStorageAzure struct { + // accountName defines the account to be used by the registry. + // +optional + AccountName string `json:"accountName,omitempty" protobuf:"bytes,1,opt,name=accountName"` + // container defines Azure's container to be used by registry. + // +optional + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:MinLength=3 + // +kubebuilder:validation:Pattern=`^[0-9a-z]+(-[0-9a-z]+)*$` + Container string `json:"container,omitempty" protobuf:"bytes,2,opt,name=container"` +} + +// ImageRegistryConfigStorage describes how the storage should be configured +// for the image registry. +type ImageRegistryConfigStorage struct { + // emptyDir represents ephemeral storage on the pod's host node. + // WARNING: this storage cannot be used with more than 1 replica and + // is not suitable for production use. When the pod is removed from a + // node for any reason, the data in the emptyDir is deleted forever. + // +optional + EmptyDir *ImageRegistryConfigStorageEmptyDir `json:"emptyDir,omitempty" protobuf:"bytes,1,opt,name=emptyDir"` + // s3 represents configuration that uses Amazon Simple Storage Service. + // +optional + S3 *ImageRegistryConfigStorageS3 `json:"s3,omitempty" protobuf:"bytes,2,opt,name=s3"` + // gcs represents configuration that uses Google Cloud Storage. + // +optional + GCS *ImageRegistryConfigStorageGCS `json:"gcs,omitempty" protobuf:"bytes,3,opt,name=gcs"` + // swift represents configuration that uses OpenStack Object Storage. + // +optional + Swift *ImageRegistryConfigStorageSwift `json:"swift,omitempty" protobuf:"bytes,4,opt,name=swift"` + // pvc represents configuration that uses a PersistentVolumeClaim. + // +optional + PVC *ImageRegistryConfigStoragePVC `json:"pvc,omitempty" protobuf:"bytes,5,opt,name=pvc"` + // azure represents configuration that uses Azure Blob Storage. + // +optional + Azure *ImageRegistryConfigStorageAzure `json:"azure,omitempty" protobuf:"bytes,6,opt,name=azure"` +} + +// ImageRegistryConfigRequests defines registry limits on requests read and write. +type ImageRegistryConfigRequests struct { + // read defines limits for image registry's reads. + // +optional + Read ImageRegistryConfigRequestsLimits `json:"read,omitempty" protobuf:"bytes,1,opt,name=read"` + // write defines limits for image registry's writes. + // +optional + Write ImageRegistryConfigRequestsLimits `json:"write,omitempty" protobuf:"bytes,2,opt,name=write"` +} + +// ImageRegistryConfigRequestsLimits holds configuration on the max, enqueued +// and waiting registry's API requests. +type ImageRegistryConfigRequestsLimits struct { + // maxRunning sets the maximum in flight api requests to the registry. + // +optional + MaxRunning int `json:"maxRunning,omitempty" protobuf:"varint,1,opt,name=maxRunning"` + // maxInQueue sets the maximum queued api requests to the registry. + // +optional + MaxInQueue int `json:"maxInQueue,omitempty" protobuf:"varint,2,opt,name=maxInQueue"` + // maxWaitInQueue sets the maximum time a request can wait in the queue + // before being rejected. + // +optional + MaxWaitInQueue metav1.Duration `json:"maxWaitInQueue,omitempty" protobuf:"bytes,3,opt,name=maxWaitInQueue"` +} + +// ImageRegistryConfigRoute holds information on external route access to image +// registry. +type ImageRegistryConfigRoute struct { + // name of the route to be created. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // hostname for the route. + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` + // secretName points to secret containing the certificates to be used + // by the route. + // +optional + SecretName string `json:"secretName,omitempty" protobuf:"bytes,3,opt,name=secretName"` +} diff --git a/vendor/github.com/openshift/api/imageregistry/v1/types_imagepruner.go b/vendor/github.com/openshift/api/imageregistry/v1/types_imagepruner.go new file mode 100644 index 0000000000000000000000000000000000000000..2d061b649f37cd757784bfbefe15bebe1857d958 --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/types_imagepruner.go @@ -0,0 +1,84 @@ +package v1 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImagePrunerList is a slice of ImagePruner objects. +type ImagePrunerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Items []ImagePruner `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImagePruner is the configuration object for an image registry pruner +// managed by the registry operator. +type ImagePruner struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + Spec ImagePrunerSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // +optional + Status ImagePrunerStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// ImagePrunerSpec defines the specs for the running image pruner. +type ImagePrunerSpec struct { + // schedule specifies when to execute the job using standard cronjob syntax: https://wikipedia.org/wiki/Cron. + // Defaults to `0 0 * * *`. + // +optional + Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"` + // suspend specifies whether or not to suspend subsequent executions of this cronjob. + // Defaults to false. + // +optional + Suspend *bool `json:"suspend,omitempty" protobuf:"bytes,2,opt,name=suspend"` + // keepTagRevisions specifies the number of image revisions for a tag in an image stream that will be preserved. + // Defaults to 5. + // +optional + KeepTagRevisions *int `json:"keepTagRevisions,omitempty" protobuf:"bytes,3,opt,name=keepTagRevisions"` + // keepYoungerThan specifies the minimum age of an image and its referrers for it to be considered a candidate for pruning. + // Defaults to 96h (96 hours). + // +optional + KeepYoungerThan *time.Duration `json:"keepYoungerThan,omitempty" protobuf:"varint,4,opt,name=keepYoungerThan,casttype=time.Duration"` + // resources defines the resource requests and limits for the image pruner pod. + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,5,opt,name=resources"` + // affinity is a group of node affinity scheduling rules for the image pruner pod. + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty" protobuf:"bytes,6,opt,name=affinity"` + // nodeSelector defines the node selection constraints for the image pruner pod. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` + // tolerations defines the node tolerations for the image pruner pod. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,8,rep,name=tolerations"` + // successfulJobsHistoryLimit specifies how many successful image pruner jobs to retain. + // Defaults to 3 if not set. + // +optional + SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty" protobuf:"bytes,1,opt,name=successfulJobsHistoryLimit"` + // failedJobsHistoryLimit specifies how many failed image pruner jobs to retain. + // Defaults to 3 if not set. + // +optional + FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty" protobuf:"bytes,2,opt,name=failedJobsHistoryLimit"` +} + +// ImagePrunerStatus reports image pruner operational status. +type ImagePrunerStatus struct { + // observedGeneration is the last generation change that has been applied. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"bytes,1,opt,name=observedGeneration"` + // conditions is a list of conditions and their status. + // +optional + Conditions []operatorv1.OperatorCondition `json:"conditions,omitempty" protobuf:"bytes,2,rep,name=conditions"` +} diff --git a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..0d421faef7fcf968ec5b54508266b8d56786514c --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go @@ -0,0 +1,516 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + operatorv1 "github.com/openshift/api/operator/v1" + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Config) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigList) DeepCopyInto(out *ConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Config, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList. +func (in *ConfigList) DeepCopy() *ConfigList { + if in == nil { + return nil + } + out := new(ConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePruner) DeepCopyInto(out *ImagePruner) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePruner. +func (in *ImagePruner) DeepCopy() *ImagePruner { + if in == nil { + return nil + } + out := new(ImagePruner) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePruner) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePrunerList) DeepCopyInto(out *ImagePrunerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImagePruner, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePrunerList. +func (in *ImagePrunerList) DeepCopy() *ImagePrunerList { + if in == nil { + return nil + } + out := new(ImagePrunerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePrunerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePrunerSpec) DeepCopyInto(out *ImagePrunerSpec) { + *out = *in + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + *out = new(bool) + **out = **in + } + if in.KeepTagRevisions != nil { + in, out := &in.KeepTagRevisions, &out.KeepTagRevisions + *out = new(int) + **out = **in + } + if in.KeepYoungerThan != nil { + in, out := &in.KeepYoungerThan, &out.KeepYoungerThan + *out = new(time.Duration) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SuccessfulJobsHistoryLimit != nil { + in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit + *out = new(int32) + **out = **in + } + if in.FailedJobsHistoryLimit != nil { + in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePrunerSpec. +func (in *ImagePrunerSpec) DeepCopy() *ImagePrunerSpec { + if in == nil { + return nil + } + out := new(ImagePrunerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePrunerStatus) DeepCopyInto(out *ImagePrunerStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]operatorv1.OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePrunerStatus. +func (in *ImagePrunerStatus) DeepCopy() *ImagePrunerStatus { + if in == nil { + return nil + } + out := new(ImagePrunerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigProxy) DeepCopyInto(out *ImageRegistryConfigProxy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigProxy. +func (in *ImageRegistryConfigProxy) DeepCopy() *ImageRegistryConfigProxy { + if in == nil { + return nil + } + out := new(ImageRegistryConfigProxy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigRequests) DeepCopyInto(out *ImageRegistryConfigRequests) { + *out = *in + out.Read = in.Read + out.Write = in.Write + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigRequests. +func (in *ImageRegistryConfigRequests) DeepCopy() *ImageRegistryConfigRequests { + if in == nil { + return nil + } + out := new(ImageRegistryConfigRequests) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigRequestsLimits) DeepCopyInto(out *ImageRegistryConfigRequestsLimits) { + *out = *in + out.MaxWaitInQueue = in.MaxWaitInQueue + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigRequestsLimits. +func (in *ImageRegistryConfigRequestsLimits) DeepCopy() *ImageRegistryConfigRequestsLimits { + if in == nil { + return nil + } + out := new(ImageRegistryConfigRequestsLimits) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigRoute) DeepCopyInto(out *ImageRegistryConfigRoute) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigRoute. +func (in *ImageRegistryConfigRoute) DeepCopy() *ImageRegistryConfigRoute { + if in == nil { + return nil + } + out := new(ImageRegistryConfigRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigStorage) DeepCopyInto(out *ImageRegistryConfigStorage) { + *out = *in + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(ImageRegistryConfigStorageEmptyDir) + **out = **in + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(ImageRegistryConfigStorageS3) + (*in).DeepCopyInto(*out) + } + if in.GCS != nil { + in, out := &in.GCS, &out.GCS + *out = new(ImageRegistryConfigStorageGCS) + **out = **in + } + if in.Swift != nil { + in, out := &in.Swift, &out.Swift + *out = new(ImageRegistryConfigStorageSwift) + **out = **in + } + if in.PVC != nil { + in, out := &in.PVC, &out.PVC + *out = new(ImageRegistryConfigStoragePVC) + **out = **in + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(ImageRegistryConfigStorageAzure) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorage. +func (in *ImageRegistryConfigStorage) DeepCopy() *ImageRegistryConfigStorage { + if in == nil { + return nil + } + out := new(ImageRegistryConfigStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigStorageAzure) DeepCopyInto(out *ImageRegistryConfigStorageAzure) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageAzure. +func (in *ImageRegistryConfigStorageAzure) DeepCopy() *ImageRegistryConfigStorageAzure { + if in == nil { + return nil + } + out := new(ImageRegistryConfigStorageAzure) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigStorageEmptyDir) DeepCopyInto(out *ImageRegistryConfigStorageEmptyDir) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageEmptyDir. +func (in *ImageRegistryConfigStorageEmptyDir) DeepCopy() *ImageRegistryConfigStorageEmptyDir { + if in == nil { + return nil + } + out := new(ImageRegistryConfigStorageEmptyDir) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigStorageGCS) DeepCopyInto(out *ImageRegistryConfigStorageGCS) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageGCS. +func (in *ImageRegistryConfigStorageGCS) DeepCopy() *ImageRegistryConfigStorageGCS { + if in == nil { + return nil + } + out := new(ImageRegistryConfigStorageGCS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigStoragePVC) DeepCopyInto(out *ImageRegistryConfigStoragePVC) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStoragePVC. +func (in *ImageRegistryConfigStoragePVC) DeepCopy() *ImageRegistryConfigStoragePVC { + if in == nil { + return nil + } + out := new(ImageRegistryConfigStoragePVC) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigStorageS3) DeepCopyInto(out *ImageRegistryConfigStorageS3) { + *out = *in + if in.CloudFront != nil { + in, out := &in.CloudFront, &out.CloudFront + *out = new(ImageRegistryConfigStorageS3CloudFront) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageS3. +func (in *ImageRegistryConfigStorageS3) DeepCopy() *ImageRegistryConfigStorageS3 { + if in == nil { + return nil + } + out := new(ImageRegistryConfigStorageS3) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigStorageS3CloudFront) DeepCopyInto(out *ImageRegistryConfigStorageS3CloudFront) { + *out = *in + in.PrivateKey.DeepCopyInto(&out.PrivateKey) + out.Duration = in.Duration + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageS3CloudFront. +func (in *ImageRegistryConfigStorageS3CloudFront) DeepCopy() *ImageRegistryConfigStorageS3CloudFront { + if in == nil { + return nil + } + out := new(ImageRegistryConfigStorageS3CloudFront) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigStorageSwift) DeepCopyInto(out *ImageRegistryConfigStorageSwift) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageSwift. +func (in *ImageRegistryConfigStorageSwift) DeepCopy() *ImageRegistryConfigStorageSwift { + if in == nil { + return nil + } + out := new(ImageRegistryConfigStorageSwift) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistrySpec) DeepCopyInto(out *ImageRegistrySpec) { + *out = *in + out.Proxy = in.Proxy + in.Storage.DeepCopyInto(&out.Storage) + out.Requests = in.Requests + if in.Routes != nil { + in, out := &in.Routes, &out.Routes + *out = make([]ImageRegistryConfigRoute, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistrySpec. +func (in *ImageRegistrySpec) DeepCopy() *ImageRegistrySpec { + if in == nil { + return nil + } + out := new(ImageRegistrySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryStatus) DeepCopyInto(out *ImageRegistryStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + in.Storage.DeepCopyInto(&out.Storage) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryStatus. +func (in *ImageRegistryStatus) DeepCopy() *ImageRegistryStatus { + if in == nil { + return nil + } + out := new(ImageRegistryStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..9f453842fdccd6f524c4c246127c6957f79eeaf5 --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,245 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Config = map[string]string{ + "": "Config is the configuration object for a registry instance managed by the registry operator", +} + +func (Config) SwaggerDoc() map[string]string { + return map_Config +} + +var map_ConfigList = map[string]string{ + "": "ConfigList is a slice of Config objects.", +} + +func (ConfigList) SwaggerDoc() map[string]string { + return map_ConfigList +} + +var map_ImageRegistryConfigProxy = map[string]string{ + "": "ImageRegistryConfigProxy defines proxy configuration to be used by registry.", + "http": "http defines the proxy to be used by the image registry when accessing HTTP endpoints.", + "https": "https defines the proxy to be used by the image registry when accessing HTTPS endpoints.", + "noProxy": "noProxy defines a comma-separated list of host names that shouldn't go through any proxy.", +} + +func (ImageRegistryConfigProxy) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigProxy +} + +var map_ImageRegistryConfigRequests = map[string]string{ + "": "ImageRegistryConfigRequests defines registry limits on requests read and write.", + "read": "read defines limits for image registry's reads.", + "write": "write defines limits for image registry's writes.", +} + +func (ImageRegistryConfigRequests) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigRequests +} + +var map_ImageRegistryConfigRequestsLimits = map[string]string{ + "": "ImageRegistryConfigRequestsLimits holds configuration on the max, enqueued and waiting registry's API requests.", + "maxRunning": "maxRunning sets the maximum in flight api requests to the registry.", + "maxInQueue": "maxInQueue sets the maximum queued api requests to the registry.", + "maxWaitInQueue": "maxWaitInQueue sets the maximum time a request can wait in the queue before being rejected.", +} + +func (ImageRegistryConfigRequestsLimits) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigRequestsLimits +} + +var map_ImageRegistryConfigRoute = map[string]string{ + "": "ImageRegistryConfigRoute holds information on external route access to image registry.", + "name": "name of the route to be created.", + "hostname": "hostname for the route.", + "secretName": "secretName points to secret containing the certificates to be used by the route.", +} + +func (ImageRegistryConfigRoute) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigRoute +} + +var map_ImageRegistryConfigStorage = map[string]string{ + "": "ImageRegistryConfigStorage describes how the storage should be configured for the image registry.", + "emptyDir": "emptyDir represents ephemeral storage on the pod's host node. WARNING: this storage cannot be used with more than 1 replica and is not suitable for production use. When the pod is removed from a node for any reason, the data in the emptyDir is deleted forever.", + "s3": "s3 represents configuration that uses Amazon Simple Storage Service.", + "gcs": "gcs represents configuration that uses Google Cloud Storage.", + "swift": "swift represents configuration that uses OpenStack Object Storage.", + "pvc": "pvc represents configuration that uses a PersistentVolumeClaim.", + "azure": "azure represents configuration that uses Azure Blob Storage.", +} + +func (ImageRegistryConfigStorage) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigStorage +} + +var map_ImageRegistryConfigStorageAzure = map[string]string{ + "": "ImageRegistryConfigStorageAzure holds the information to configure the registry to use Azure Blob Storage for backend storage.", + "accountName": "accountName defines the account to be used by the registry.", + "container": "container defines Azure's container to be used by registry.", +} + +func (ImageRegistryConfigStorageAzure) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigStorageAzure +} + +var map_ImageRegistryConfigStorageEmptyDir = map[string]string{ + "": "ImageRegistryConfigStorageEmptyDir is an place holder to be used when when registry is leveraging ephemeral storage.", +} + +func (ImageRegistryConfigStorageEmptyDir) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigStorageEmptyDir +} + +var map_ImageRegistryConfigStorageGCS = map[string]string{ + "": "ImageRegistryConfigStorageGCS holds GCS configuration.", + "bucket": "bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided.", + "region": "region is the GCS location in which your bucket exists. Optional, will be set based on the installed GCS Region.", + "projectID": "projectID is the Project ID of the GCP project that this bucket should be associated with.", + "keyID": "keyID is the KMS key ID to use for encryption. Optional, buckets are encrypted by default on GCP. This allows for the use of a custom encryption key.", +} + +func (ImageRegistryConfigStorageGCS) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigStorageGCS +} + +var map_ImageRegistryConfigStoragePVC = map[string]string{ + "": "ImageRegistryConfigStoragePVC holds Persistent Volume Claims data to be used by the registry.", + "claim": "claim defines the Persisent Volume Claim's name to be used.", +} + +func (ImageRegistryConfigStoragePVC) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigStoragePVC +} + +var map_ImageRegistryConfigStorageS3 = map[string]string{ + "": "ImageRegistryConfigStorageS3 holds the information to configure the registry to use the AWS S3 service for backend storage https://docs.docker.com/registry/storage-drivers/s3/", + "bucket": "bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided.", + "region": "region is the AWS region in which your bucket exists. Optional, will be set based on the installed AWS Region.", + "regionEndpoint": "regionEndpoint is the endpoint for S3 compatible storage services. Optional, defaults based on the Region that is provided.", + "encrypt": "encrypt specifies whether the registry stores the image in encrypted format or not. Optional, defaults to false.", + "keyID": "keyID is the KMS key ID to use for encryption. Optional, Encrypt must be true, or this parameter is ignored.", + "cloudFront": "cloudFront configures Amazon Cloudfront as the storage middleware in a registry.", +} + +func (ImageRegistryConfigStorageS3) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigStorageS3 +} + +var map_ImageRegistryConfigStorageS3CloudFront = map[string]string{ + "": "ImageRegistryConfigStorageS3CloudFront holds the configuration to use Amazon Cloudfront as the storage middleware in a registry. https://docs.docker.com/registry/configuration/#cloudfront", + "baseURL": "baseURL contains the SCHEME://HOST[/PATH] at which Cloudfront is served.", + "privateKey": "privateKey points to secret containing the private key, provided by AWS.", + "keypairID": "keypairID is key pair ID provided by AWS.", + "duration": "duration is the duration of the Cloudfront session.", +} + +func (ImageRegistryConfigStorageS3CloudFront) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigStorageS3CloudFront +} + +var map_ImageRegistryConfigStorageSwift = map[string]string{ + "": "ImageRegistryConfigStorageSwift holds the information to configure the registry to use the OpenStack Swift service for backend storage https://docs.docker.com/registry/storage-drivers/swift/", + "authURL": "authURL defines the URL for obtaining an authentication token.", + "authVersion": "authVersion specifies the OpenStack Auth's version.", + "container": "container defines the name of Swift container where to store the registry's data.", + "domain": "domain specifies Openstack's domain name for Identity v3 API.", + "domainID": "domainID specifies Openstack's domain id for Identity v3 API.", + "tenant": "tenant defines Openstack tenant name to be used by registry.", + "tenantID": "tenant defines Openstack tenant id to be used by registry.", + "regionName": "regionName defines Openstack's region in which container exists.", +} + +func (ImageRegistryConfigStorageSwift) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigStorageSwift +} + +var map_ImageRegistrySpec = map[string]string{ + "": "ImageRegistrySpec defines the specs for the running registry.", + "managementState": "managementState indicates whether the registry instance represented by this config instance is under operator management or not. Valid values are Managed, Unmanaged, and Removed.", + "httpSecret": "httpSecret is the value needed by the registry to secure uploads, generated by default.", + "proxy": "proxy defines the proxy to be used when calling master api, upstream registries, etc.", + "storage": "storage details for configuring registry storage, e.g. S3 bucket coordinates.", + "readOnly": "readOnly indicates whether the registry instance should reject attempts to push new images or delete existing ones.", + "disableRedirect": "disableRedirect controls whether to route all data through the Registry, rather than redirecting to the backend.", + "requests": "requests controls how many parallel requests a given registry instance will handle before queuing additional requests.", + "defaultRoute": "defaultRoute indicates whether an external facing route for the registry should be created using the default generated hostname.", + "routes": "routes defines additional external facing routes which should be created for the registry.", + "replicas": "replicas determines the number of registry instances to run.", + "logging": "logging determines the level of logging enabled in the registry.", + "resources": "resources defines the resource requests+limits for the registry pod.", + "nodeSelector": "nodeSelector defines the node selection constraints for the registry pod.", + "tolerations": "tolerations defines the tolerations for the registry pod.", + "rolloutStrategy": "rolloutStrategy defines rollout strategy for the image registry deployment.", +} + +func (ImageRegistrySpec) SwaggerDoc() map[string]string { + return map_ImageRegistrySpec +} + +var map_ImageRegistryStatus = map[string]string{ + "": "ImageRegistryStatus reports image registry operational status.", + "storageManaged": "storageManaged is a boolean which denotes whether or not we created the registry storage medium (such as an S3 bucket).", + "storage": "storage indicates the current applied storage configuration of the registry.", +} + +func (ImageRegistryStatus) SwaggerDoc() map[string]string { + return map_ImageRegistryStatus +} + +var map_ImagePruner = map[string]string{ + "": "ImagePruner is the configuration object for an image registry pruner managed by the registry operator.", +} + +func (ImagePruner) SwaggerDoc() map[string]string { + return map_ImagePruner +} + +var map_ImagePrunerList = map[string]string{ + "": "ImagePrunerList is a slice of ImagePruner objects.", +} + +func (ImagePrunerList) SwaggerDoc() map[string]string { + return map_ImagePrunerList +} + +var map_ImagePrunerSpec = map[string]string{ + "": "ImagePrunerSpec defines the specs for the running image pruner.", + "schedule": "schedule specifies when to execute the job using standard cronjob syntax: https://wikipedia.org/wiki/Cron. Defaults to `0 0 * * *`.", + "suspend": "suspend specifies whether or not to suspend subsequent executions of this cronjob. Defaults to false.", + "keepTagRevisions": "keepTagRevisions specifies the number of image revisions for a tag in an image stream that will be preserved. Defaults to 5.", + "keepYoungerThan": "keepYoungerThan specifies the minimum age of an image and its referrers for it to be considered a candidate for pruning. Defaults to 96h (96 hours).", + "resources": "resources defines the resource requests and limits for the image pruner pod.", + "affinity": "affinity is a group of node affinity scheduling rules for the image pruner pod.", + "nodeSelector": "nodeSelector defines the node selection constraints for the image pruner pod.", + "tolerations": "tolerations defines the node tolerations for the image pruner pod.", + "successfulJobsHistoryLimit": "successfulJobsHistoryLimit specifies how many successful image pruner jobs to retain. Defaults to 3 if not set.", + "failedJobsHistoryLimit": "failedJobsHistoryLimit specifies how many failed image pruner jobs to retain. Defaults to 3 if not set.", +} + +func (ImagePrunerSpec) SwaggerDoc() map[string]string { + return map_ImagePrunerSpec +} + +var map_ImagePrunerStatus = map[string]string{ + "": "ImagePrunerStatus reports image pruner operational status.", + "observedGeneration": "observedGeneration is the last generation change that has been applied.", + "conditions": "conditions is a list of conditions and their status.", +} + +func (ImagePrunerStatus) SwaggerDoc() map[string]string { + return map_ImagePrunerStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/install.go b/vendor/github.com/openshift/api/install.go new file mode 100644 index 0000000000000000000000000000000000000000..60af26b05f4e6cf8bed4ce3a9f3a28b683c3774f --- /dev/null +++ b/vendor/github.com/openshift/api/install.go @@ -0,0 +1,121 @@ +package api + +import ( + kadmissionv1beta1 "k8s.io/api/admission/v1beta1" + kadmissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + kappsv1 "k8s.io/api/apps/v1" + kappsv1beta1 "k8s.io/api/apps/v1beta1" + kappsv1beta2 "k8s.io/api/apps/v1beta2" + kauthenticationv1 "k8s.io/api/authentication/v1" + kauthenticationv1beta1 "k8s.io/api/authentication/v1beta1" + kauthorizationv1 "k8s.io/api/authorization/v1" + kauthorizationv1beta1 "k8s.io/api/authorization/v1beta1" + kautoscalingv1 "k8s.io/api/autoscaling/v1" + kautoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + kbatchv1 "k8s.io/api/batch/v1" + kbatchv1beta1 "k8s.io/api/batch/v1beta1" + kbatchv2alpha1 "k8s.io/api/batch/v2alpha1" + kcertificatesv1beta1 "k8s.io/api/certificates/v1beta1" + kcorev1 "k8s.io/api/core/v1" + keventsv1beta1 "k8s.io/api/events/v1beta1" + kextensionsv1beta1 "k8s.io/api/extensions/v1beta1" + kimagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1" + knetworkingv1 "k8s.io/api/networking/v1" + kpolicyv1beta1 "k8s.io/api/policy/v1beta1" + krbacv1 "k8s.io/api/rbac/v1" + krbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + krbacv1beta1 "k8s.io/api/rbac/v1beta1" + kschedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" + kschedulingv1beta1 "k8s.io/api/scheduling/v1beta1" + ksettingsv1alpha1 "k8s.io/api/settings/v1alpha1" + kstoragev1 "k8s.io/api/storage/v1" + kstoragev1alpha1 "k8s.io/api/storage/v1alpha1" + kstoragev1beta1 "k8s.io/api/storage/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/openshift/api/apps" + "github.com/openshift/api/authorization" + "github.com/openshift/api/build" + "github.com/openshift/api/config" + "github.com/openshift/api/image" + "github.com/openshift/api/imageregistry" + "github.com/openshift/api/kubecontrolplane" + "github.com/openshift/api/network" + "github.com/openshift/api/oauth" + "github.com/openshift/api/openshiftcontrolplane" + "github.com/openshift/api/operator" + "github.com/openshift/api/osin" + "github.com/openshift/api/project" + "github.com/openshift/api/quota" + "github.com/openshift/api/route" + "github.com/openshift/api/samples" + "github.com/openshift/api/security" + "github.com/openshift/api/servicecertsigner" + "github.com/openshift/api/template" + "github.com/openshift/api/user" + + // just make sure this compiles. Don't add it to a scheme + _ "github.com/openshift/api/legacyconfig/v1" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder( + apps.Install, + authorization.Install, + build.Install, + config.Install, + image.Install, + imageregistry.Install, + kubecontrolplane.Install, + network.Install, + oauth.Install, + openshiftcontrolplane.Install, + operator.Install, + osin.Install, + project.Install, + quota.Install, + route.Install, + samples.Install, + security.Install, + servicecertsigner.Install, + template.Install, + user.Install, + ) + // Install is a function which adds every version of every openshift group to a scheme + Install = schemeBuilder.AddToScheme + + kubeSchemeBuilder = runtime.NewSchemeBuilder( + kadmissionv1beta1.AddToScheme, + kadmissionregistrationv1beta1.AddToScheme, + kappsv1.AddToScheme, + kappsv1beta1.AddToScheme, + kappsv1beta2.AddToScheme, + kauthenticationv1.AddToScheme, + kauthenticationv1beta1.AddToScheme, + kauthorizationv1.AddToScheme, + kauthorizationv1beta1.AddToScheme, + kautoscalingv1.AddToScheme, + kautoscalingv2beta1.AddToScheme, + kbatchv1.AddToScheme, + kbatchv1beta1.AddToScheme, + kbatchv2alpha1.AddToScheme, + kcertificatesv1beta1.AddToScheme, + kcorev1.AddToScheme, + keventsv1beta1.AddToScheme, + kextensionsv1beta1.AddToScheme, + kimagepolicyv1alpha1.AddToScheme, + knetworkingv1.AddToScheme, + kpolicyv1beta1.AddToScheme, + krbacv1.AddToScheme, + krbacv1beta1.AddToScheme, + krbacv1alpha1.AddToScheme, + kschedulingv1alpha1.AddToScheme, + kschedulingv1beta1.AddToScheme, + ksettingsv1alpha1.AddToScheme, + kstoragev1.AddToScheme, + kstoragev1beta1.AddToScheme, + kstoragev1alpha1.AddToScheme, + ) + // InstallKube is a way to install all the external k8s.io/api types + InstallKube = kubeSchemeBuilder.AddToScheme +) diff --git a/vendor/github.com/openshift/api/kubecontrolplane/install.go b/vendor/github.com/openshift/api/kubecontrolplane/install.go new file mode 100644 index 0000000000000000000000000000000000000000..c34b777235cfca28bc93b997e1e2cf4e8fe716fa --- /dev/null +++ b/vendor/github.com/openshift/api/kubecontrolplane/install.go @@ -0,0 +1,26 @@ +package kubecontrolplane + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1" +) + +const ( + GroupName = "kubecontrolplane.config.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(kubecontrolplanev1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/doc.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..d8872a61329bf5e80b6deef9e1bab0c762cf18a6 --- /dev/null +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=kubecontrolplane.config.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/register.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..f8abc8ad8ce8f8096021ea26b23b7219ae69d44d --- /dev/null +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/register.go @@ -0,0 +1,38 @@ +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + osinv1 "github.com/openshift/api/osin/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "kubecontrolplane.config.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, osinv1.Install, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &KubeAPIServerConfig{}, + &KubeControllerManagerConfig{}, + ) + return nil +} diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..97c1d5b7d0599c1c02e622536d67eab424c801a9 --- /dev/null +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go @@ -0,0 +1,213 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "fmt" + + configv1 "github.com/openshift/api/config/v1" + osinv1 "github.com/openshift/api/osin/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type KubeAPIServerConfig struct { + metav1.TypeMeta `json:",inline"` + + // provides the standard apiserver configuration + configv1.GenericAPIServerConfig `json:",inline"` + + // authConfig configures authentication options in addition to the standard + // oauth token and client certificate authenticators + AuthConfig MasterAuthConfig `json:"authConfig"` + + // aggregatorConfig has options for configuring the aggregator component of the API server. + AggregatorConfig AggregatorConfig `json:"aggregatorConfig"` + + // kubeletClientInfo contains information about how to connect to kubelets + KubeletClientInfo KubeletConnectionInfo `json:"kubeletClientInfo"` + + // servicesSubnet is the subnet to use for assigning service IPs + ServicesSubnet string `json:"servicesSubnet"` + // servicesNodePortRange is the range to use for assigning service public ports on a host. + ServicesNodePortRange string `json:"servicesNodePortRange"` + + // consolePublicURL is an optional URL to provide a redirect from the kube-apiserver to the webconsole + ConsolePublicURL string `json:"consolePublicURL"` + + // UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS! + // TODO I think we should just drop this feature. + UserAgentMatchingConfig UserAgentMatchingConfig `json:"userAgentMatchingConfig"` + + // imagePolicyConfig feeds the image policy admission plugin + // TODO make it an admission plugin config + ImagePolicyConfig KubeAPIServerImagePolicyConfig `json:"imagePolicyConfig"` + + // projectConfig feeds an admission plugin + // TODO make it an admission plugin config + ProjectConfig KubeAPIServerProjectConfig `json:"projectConfig"` + + // serviceAccountPublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. + // (If any file contains a private key, the public portion of the key is used) + // The list of public keys is used to verify presented service account tokens. + // Each key is tried in order until the list is exhausted or verification succeeds. + // If no keys are specified, no service account authentication will be available. + ServiceAccountPublicKeyFiles []string `json:"serviceAccountPublicKeyFiles"` + + // oauthConfig, if present start the /oauth endpoint in this process + OAuthConfig *osinv1.OAuthConfig `json:"oauthConfig"` + + // TODO this needs to be removed. + APIServerArguments map[string]Arguments `json:"apiServerArguments"` +} + +// Arguments masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type Arguments []string + +func (t Arguments) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +type KubeAPIServerImagePolicyConfig struct { + // internalRegistryHostname sets the hostname for the default internal image + // registry. The value must be in "hostname[:port]" format. + // For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY + // environment variable but this setting overrides the environment variable. + InternalRegistryHostname string `json:"internalRegistryHostname"` + // externalRegistryHostnames provides the hostnames for the default external image + // registry. The external hostname should be set only when the image registry + // is exposed externally. The first value is used in 'publicDockerImageRepository' + // field in ImageStreams. The value must be in "hostname[:port]" format. + ExternalRegistryHostnames []string `json:"externalRegistryHostnames"` +} + +type KubeAPIServerProjectConfig struct { + // defaultNodeSelector holds default project node label selector + DefaultNodeSelector string `json:"defaultNodeSelector"` +} + +// KubeletConnectionInfo holds information necessary for connecting to a kubelet +type KubeletConnectionInfo struct { + // port is the port to connect to kubelets on + Port uint32 `json:"port"` + // ca is the CA for verifying TLS connections to kubelets + CA string `json:"ca"` + // CertInfo is the TLS client cert information for securing communication to kubelets + // this is anonymous so that we can inline it for serialization + configv1.CertInfo `json:",inline"` +} + +// UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS! +type UserAgentMatchingConfig struct { + // requiredClients if this list is non-empty, then a User-Agent must match one of the UserAgentRegexes to be allowed + RequiredClients []UserAgentMatchRule `json:"requiredClients"` + + // deniedClients if this list is non-empty, then a User-Agent must not match any of the UserAgentRegexes + DeniedClients []UserAgentDenyRule `json:"deniedClients"` + + // defaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given. + DefaultRejectionMessage string `json:"defaultRejectionMessage"` +} + +// UserAgentMatchRule describes how to match a given request based on User-Agent and HTTPVerb +type UserAgentMatchRule struct { + // regex is a regex that is checked against the User-Agent. + // Known variants of oc clients + // 1. oc accessing kube resources: oc/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 2. oc accessing openshift resources: oc/v1.1.3 (linux/amd64) openshift/b348c2f + // 3. openshift kubectl accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 4. openshift kubectl accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f + // 5. oadm accessing kube resources: oadm/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 6. oadm accessing openshift resources: oadm/v1.1.3 (linux/amd64) openshift/b348c2f + // 7. openshift cli accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 8. openshift cli accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f + Regex string `json:"regex"` + + // httpVerbs specifies which HTTP verbs should be matched. An empty list means "match all verbs". + HTTPVerbs []string `json:"httpVerbs"` +} + +// UserAgentDenyRule adds a rejection message that can be used to help a user figure out how to get an approved client +type UserAgentDenyRule struct { + UserAgentMatchRule `json:",inline"` + + // RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used. + RejectionMessage string `json:"rejectionMessage"` +} + +// MasterAuthConfig configures authentication options in addition to the standard +// oauth token and client certificate authenticators +type MasterAuthConfig struct { + // requestHeader holds options for setting up a front proxy against the the API. It is optional. + RequestHeader *RequestHeaderAuthenticationOptions `json:"requestHeader"` + // webhookTokenAuthenticators, if present configures remote token reviewers + WebhookTokenAuthenticators []WebhookTokenAuthenticator `json:"webhookTokenAuthenticators"` + // oauthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization + // Server Metadata for an external OAuth server. + // See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + // This option is mutually exclusive with OAuthConfig + OAuthMetadataFile string `json:"oauthMetadataFile"` +} + +// WebhookTokenAuthenticators holds the necessary configuation options for +// external token authenticators +type WebhookTokenAuthenticator struct { + // configFile is a path to a Kubeconfig file with the webhook configuration + ConfigFile string `json:"configFile"` + // cacheTTL indicates how long an authentication result should be cached. + // It takes a valid time duration string (e.g. "5m"). + // If empty, you get a default timeout of 2 minutes. + // If zero (e.g. "0m"), caching is disabled + CacheTTL string `json:"cacheTTL"` +} + +// RequestHeaderAuthenticationOptions provides options for setting up a front proxy against the entire +// API instead of against the /oauth endpoint. +type RequestHeaderAuthenticationOptions struct { + // clientCA is a file with the trusted signer certs. It is required. + ClientCA string `json:"clientCA"` + // clientCommonNames is a required list of common names to require a match from. + ClientCommonNames []string `json:"clientCommonNames"` + + // usernameHeaders is the list of headers to check for user information. First hit wins. + UsernameHeaders []string `json:"usernameHeaders"` + // groupHeaders is the set of headers to check for group information. All are unioned. + GroupHeaders []string `json:"groupHeaders"` + // extraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested. + ExtraHeaderPrefixes []string `json:"extraHeaderPrefixes"` +} + +// AggregatorConfig holds information required to make the aggregator function. +type AggregatorConfig struct { + // proxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers + ProxyClientInfo configv1.CertInfo `json:"proxyClientInfo"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type KubeControllerManagerConfig struct { + metav1.TypeMeta `json:",inline"` + + // serviceServingCert provides support for the old alpha service serving cert signer CA bundle + ServiceServingCert ServiceServingCert `json:"serviceServingCert"` + + // projectConfig is an optimization for the daemonset controller + ProjectConfig KubeControllerManagerProjectConfig `json:"projectConfig"` + + // extendedArguments is used to configure the kube-controller-manager + ExtendedArguments map[string]Arguments `json:"extendedArguments"` +} + +type KubeControllerManagerProjectConfig struct { + // defaultNodeSelector holds default project node label selector + DefaultNodeSelector string `json:"defaultNodeSelector"` +} + +// ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for +// pods fulfilling a service to serve with. +type ServiceServingCert struct { + // CertFile is a file containing a PEM-encoded certificate + CertFile string `json:"certFile"` +} diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..3e4b5730f25f1b2fd68df241a982a0c5b5a08f4c --- /dev/null +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.deepcopy.go @@ -0,0 +1,378 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + osinv1 "github.com/openshift/api/osin/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregatorConfig) DeepCopyInto(out *AggregatorConfig) { + *out = *in + out.ProxyClientInfo = in.ProxyClientInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregatorConfig. +func (in *AggregatorConfig) DeepCopy() *AggregatorConfig { + if in == nil { + return nil + } + out := new(AggregatorConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Arguments) DeepCopyInto(out *Arguments) { + { + in := &in + *out = make(Arguments, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Arguments. +func (in Arguments) DeepCopy() Arguments { + if in == nil { + return nil + } + out := new(Arguments) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.GenericAPIServerConfig.DeepCopyInto(&out.GenericAPIServerConfig) + in.AuthConfig.DeepCopyInto(&out.AuthConfig) + out.AggregatorConfig = in.AggregatorConfig + out.KubeletClientInfo = in.KubeletClientInfo + in.UserAgentMatchingConfig.DeepCopyInto(&out.UserAgentMatchingConfig) + in.ImagePolicyConfig.DeepCopyInto(&out.ImagePolicyConfig) + out.ProjectConfig = in.ProjectConfig + if in.ServiceAccountPublicKeyFiles != nil { + in, out := &in.ServiceAccountPublicKeyFiles, &out.ServiceAccountPublicKeyFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OAuthConfig != nil { + in, out := &in.OAuthConfig, &out.OAuthConfig + *out = new(osinv1.OAuthConfig) + (*in).DeepCopyInto(*out) + } + if in.APIServerArguments != nil { + in, out := &in.APIServerArguments, &out.APIServerArguments + *out = make(map[string]Arguments, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(Arguments, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerConfig. +func (in *KubeAPIServerConfig) DeepCopy() *KubeAPIServerConfig { + if in == nil { + return nil + } + out := new(KubeAPIServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeAPIServerConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerImagePolicyConfig) DeepCopyInto(out *KubeAPIServerImagePolicyConfig) { + *out = *in + if in.ExternalRegistryHostnames != nil { + in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerImagePolicyConfig. +func (in *KubeAPIServerImagePolicyConfig) DeepCopy() *KubeAPIServerImagePolicyConfig { + if in == nil { + return nil + } + out := new(KubeAPIServerImagePolicyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerProjectConfig) DeepCopyInto(out *KubeAPIServerProjectConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerProjectConfig. +func (in *KubeAPIServerProjectConfig) DeepCopy() *KubeAPIServerProjectConfig { + if in == nil { + return nil + } + out := new(KubeAPIServerProjectConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ServiceServingCert = in.ServiceServingCert + out.ProjectConfig = in.ProjectConfig + if in.ExtendedArguments != nil { + in, out := &in.ExtendedArguments, &out.ExtendedArguments + *out = make(map[string]Arguments, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(Arguments, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerConfig. +func (in *KubeControllerManagerConfig) DeepCopy() *KubeControllerManagerConfig { + if in == nil { + return nil + } + out := new(KubeControllerManagerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeControllerManagerConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerProjectConfig) DeepCopyInto(out *KubeControllerManagerProjectConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerProjectConfig. +func (in *KubeControllerManagerProjectConfig) DeepCopy() *KubeControllerManagerProjectConfig { + if in == nil { + return nil + } + out := new(KubeControllerManagerProjectConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConnectionInfo) DeepCopyInto(out *KubeletConnectionInfo) { + *out = *in + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConnectionInfo. +func (in *KubeletConnectionInfo) DeepCopy() *KubeletConnectionInfo { + if in == nil { + return nil + } + out := new(KubeletConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterAuthConfig) DeepCopyInto(out *MasterAuthConfig) { + *out = *in + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = new(RequestHeaderAuthenticationOptions) + (*in).DeepCopyInto(*out) + } + if in.WebhookTokenAuthenticators != nil { + in, out := &in.WebhookTokenAuthenticators, &out.WebhookTokenAuthenticators + *out = make([]WebhookTokenAuthenticator, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterAuthConfig. +func (in *MasterAuthConfig) DeepCopy() *MasterAuthConfig { + if in == nil { + return nil + } + out := new(MasterAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderAuthenticationOptions) DeepCopyInto(out *RequestHeaderAuthenticationOptions) { + *out = *in + if in.ClientCommonNames != nil { + in, out := &in.ClientCommonNames, &out.ClientCommonNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UsernameHeaders != nil { + in, out := &in.UsernameHeaders, &out.UsernameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GroupHeaders != nil { + in, out := &in.GroupHeaders, &out.GroupHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraHeaderPrefixes != nil { + in, out := &in.ExtraHeaderPrefixes, &out.ExtraHeaderPrefixes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderAuthenticationOptions. +func (in *RequestHeaderAuthenticationOptions) DeepCopy() *RequestHeaderAuthenticationOptions { + if in == nil { + return nil + } + out := new(RequestHeaderAuthenticationOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceServingCert) DeepCopyInto(out *ServiceServingCert) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceServingCert. +func (in *ServiceServingCert) DeepCopy() *ServiceServingCert { + if in == nil { + return nil + } + out := new(ServiceServingCert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAgentDenyRule) DeepCopyInto(out *UserAgentDenyRule) { + *out = *in + in.UserAgentMatchRule.DeepCopyInto(&out.UserAgentMatchRule) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentDenyRule. +func (in *UserAgentDenyRule) DeepCopy() *UserAgentDenyRule { + if in == nil { + return nil + } + out := new(UserAgentDenyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAgentMatchRule) DeepCopyInto(out *UserAgentMatchRule) { + *out = *in + if in.HTTPVerbs != nil { + in, out := &in.HTTPVerbs, &out.HTTPVerbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentMatchRule. +func (in *UserAgentMatchRule) DeepCopy() *UserAgentMatchRule { + if in == nil { + return nil + } + out := new(UserAgentMatchRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAgentMatchingConfig) DeepCopyInto(out *UserAgentMatchingConfig) { + *out = *in + if in.RequiredClients != nil { + in, out := &in.RequiredClients, &out.RequiredClients + *out = make([]UserAgentMatchRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeniedClients != nil { + in, out := &in.DeniedClients, &out.DeniedClients + *out = make([]UserAgentDenyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentMatchingConfig. +func (in *UserAgentMatchingConfig) DeepCopy() *UserAgentMatchingConfig { + if in == nil { + return nil + } + out := new(UserAgentMatchingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookTokenAuthenticator) DeepCopyInto(out *WebhookTokenAuthenticator) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookTokenAuthenticator. +func (in *WebhookTokenAuthenticator) DeepCopy() *WebhookTokenAuthenticator { + if in == nil { + return nil + } + out := new(WebhookTokenAuthenticator) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..7a0cbada2ea7723619538947c831c7fe711454b4 --- /dev/null +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,159 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AggregatorConfig = map[string]string{ + "": "AggregatorConfig holds information required to make the aggregator function.", + "proxyClientInfo": "proxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers", +} + +func (AggregatorConfig) SwaggerDoc() map[string]string { + return map_AggregatorConfig +} + +var map_KubeAPIServerConfig = map[string]string{ + "authConfig": "authConfig configures authentication options in addition to the standard oauth token and client certificate authenticators", + "aggregatorConfig": "aggregatorConfig has options for configuring the aggregator component of the API server.", + "kubeletClientInfo": "kubeletClientInfo contains information about how to connect to kubelets", + "servicesSubnet": "servicesSubnet is the subnet to use for assigning service IPs", + "servicesNodePortRange": "servicesNodePortRange is the range to use for assigning service public ports on a host.", + "consolePublicURL": "consolePublicURL is an optional URL to provide a redirect from the kube-apiserver to the webconsole", + "userAgentMatchingConfig": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", + "imagePolicyConfig": "imagePolicyConfig feeds the image policy admission plugin", + "projectConfig": "projectConfig feeds an admission plugin", + "serviceAccountPublicKeyFiles": "serviceAccountPublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. (If any file contains a private key, the public portion of the key is used) The list of public keys is used to verify presented service account tokens. Each key is tried in order until the list is exhausted or verification succeeds. If no keys are specified, no service account authentication will be available.", + "oauthConfig": "oauthConfig, if present start the /oauth endpoint in this process", +} + +func (KubeAPIServerConfig) SwaggerDoc() map[string]string { + return map_KubeAPIServerConfig +} + +var map_KubeAPIServerImagePolicyConfig = map[string]string{ + "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.", + "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", +} + +func (KubeAPIServerImagePolicyConfig) SwaggerDoc() map[string]string { + return map_KubeAPIServerImagePolicyConfig +} + +var map_KubeAPIServerProjectConfig = map[string]string{ + "defaultNodeSelector": "defaultNodeSelector holds default project node label selector", +} + +func (KubeAPIServerProjectConfig) SwaggerDoc() map[string]string { + return map_KubeAPIServerProjectConfig +} + +var map_KubeControllerManagerConfig = map[string]string{ + "serviceServingCert": "serviceServingCert provides support for the old alpha service serving cert signer CA bundle", + "projectConfig": "projectConfig is an optimization for the daemonset controller", + "extendedArguments": "extendedArguments is used to configure the kube-controller-manager", +} + +func (KubeControllerManagerConfig) SwaggerDoc() map[string]string { + return map_KubeControllerManagerConfig +} + +var map_KubeControllerManagerProjectConfig = map[string]string{ + "defaultNodeSelector": "defaultNodeSelector holds default project node label selector", +} + +func (KubeControllerManagerProjectConfig) SwaggerDoc() map[string]string { + return map_KubeControllerManagerProjectConfig +} + +var map_KubeletConnectionInfo = map[string]string{ + "": "KubeletConnectionInfo holds information necessary for connecting to a kubelet", + "port": "port is the port to connect to kubelets on", + "ca": "ca is the CA for verifying TLS connections to kubelets", +} + +func (KubeletConnectionInfo) SwaggerDoc() map[string]string { + return map_KubeletConnectionInfo +} + +var map_MasterAuthConfig = map[string]string{ + "": "MasterAuthConfig configures authentication options in addition to the standard oauth token and client certificate authenticators", + "requestHeader": "requestHeader holds options for setting up a front proxy against the the API. It is optional.", + "webhookTokenAuthenticators": "webhookTokenAuthenticators, if present configures remote token reviewers", + "oauthMetadataFile": "oauthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization Server Metadata for an external OAuth server. See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This option is mutually exclusive with OAuthConfig", +} + +func (MasterAuthConfig) SwaggerDoc() map[string]string { + return map_MasterAuthConfig +} + +var map_RequestHeaderAuthenticationOptions = map[string]string{ + "": "RequestHeaderAuthenticationOptions provides options for setting up a front proxy against the entire API instead of against the /oauth endpoint.", + "clientCA": "clientCA is a file with the trusted signer certs. It is required.", + "clientCommonNames": "clientCommonNames is a required list of common names to require a match from.", + "usernameHeaders": "usernameHeaders is the list of headers to check for user information. First hit wins.", + "groupHeaders": "groupHeaders is the set of headers to check for group information. All are unioned.", + "extraHeaderPrefixes": "extraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested.", +} + +func (RequestHeaderAuthenticationOptions) SwaggerDoc() map[string]string { + return map_RequestHeaderAuthenticationOptions +} + +var map_ServiceServingCert = map[string]string{ + "": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", + "certFile": "CertFile is a file containing a PEM-encoded certificate", +} + +func (ServiceServingCert) SwaggerDoc() map[string]string { + return map_ServiceServingCert +} + +var map_UserAgentDenyRule = map[string]string{ + "": "UserAgentDenyRule adds a rejection message that can be used to help a user figure out how to get an approved client", + "rejectionMessage": "RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.", +} + +func (UserAgentDenyRule) SwaggerDoc() map[string]string { + return map_UserAgentDenyRule +} + +var map_UserAgentMatchRule = map[string]string{ + "": "UserAgentMatchRule describes how to match a given request based on User-Agent and HTTPVerb", + "regex": "regex is a regex that is checked against the User-Agent. Known variants of oc clients 1. oc accessing kube resources: oc/v1.2.0 (linux/amd64) kubernetes/bc4550d 2. oc accessing openshift resources: oc/v1.1.3 (linux/amd64) openshift/b348c2f 3. openshift kubectl accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d 4. openshift kubectl accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f 5. oadm accessing kube resources: oadm/v1.2.0 (linux/amd64) kubernetes/bc4550d 6. oadm accessing openshift resources: oadm/v1.1.3 (linux/amd64) openshift/b348c2f 7. openshift cli accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d 8. openshift cli accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f", + "httpVerbs": "httpVerbs specifies which HTTP verbs should be matched. An empty list means \"match all verbs\".", +} + +func (UserAgentMatchRule) SwaggerDoc() map[string]string { + return map_UserAgentMatchRule +} + +var map_UserAgentMatchingConfig = map[string]string{ + "": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", + "requiredClients": "requiredClients if this list is non-empty, then a User-Agent must match one of the UserAgentRegexes to be allowed", + "deniedClients": "deniedClients if this list is non-empty, then a User-Agent must not match any of the UserAgentRegexes", + "defaultRejectionMessage": "defaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given.", +} + +func (UserAgentMatchingConfig) SwaggerDoc() map[string]string { + return map_UserAgentMatchingConfig +} + +var map_WebhookTokenAuthenticator = map[string]string{ + "": "WebhookTokenAuthenticators holds the necessary configuation options for external token authenticators", + "configFile": "configFile is a path to a Kubeconfig file with the webhook configuration", + "cacheTTL": "cacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get a default timeout of 2 minutes. If zero (e.g. \"0m\"), caching is disabled", +} + +func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string { + return map_WebhookTokenAuthenticator +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/doc.go b/vendor/github.com/openshift/api/legacyconfig/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..93fc6dc50d244d4d2e78a620b109adad110ca66f --- /dev/null +++ b/vendor/github.com/openshift/api/legacyconfig/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=legacy.config.openshift.io +// Package v1 is deprecated and exists to ease a transition to current APIs +package v1 diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/register.go b/vendor/github.com/openshift/api/legacyconfig/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..fa63cbd437542088a5aa110210ec388f2c613d77 --- /dev/null +++ b/vendor/github.com/openshift/api/legacyconfig/v1/register.go @@ -0,0 +1,45 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + // Legacy is the 'v1' apiVersion of config + LegacyGroupName = "" + LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypesToLegacy, + ) + InstallLegacy = legacySchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypesToLegacy(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(LegacySchemeGroupVersion, + &MasterConfig{}, + &NodeConfig{}, + &SessionSecrets{}, + + &BasicAuthPasswordIdentityProvider{}, + &AllowAllPasswordIdentityProvider{}, + &DenyAllPasswordIdentityProvider{}, + &HTPasswdPasswordIdentityProvider{}, + &LDAPPasswordIdentityProvider{}, + &KeystonePasswordIdentityProvider{}, + &RequestHeaderIdentityProvider{}, + &GitHubIdentityProvider{}, + &GitLabIdentityProvider{}, + &GoogleIdentityProvider{}, + &OpenIDIdentityProvider{}, + + &LDAPSyncConfig{}, + + &DefaultAdmissionConfig{}, + + &BuildDefaultsConfig{}, + &BuildOverridesConfig{}, + ) + return nil +} diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/serialization.go b/vendor/github.com/openshift/api/legacyconfig/v1/serialization.go new file mode 100644 index 0000000000000000000000000000000000000000..1450742739cc7bc9dbc388c443155d371e0db252 --- /dev/null +++ b/vendor/github.com/openshift/api/legacyconfig/v1/serialization.go @@ -0,0 +1,87 @@ +package v1 + +import "k8s.io/apimachinery/pkg/runtime" + +var _ runtime.NestedObjectDecoder = &MasterConfig{} + +// DecodeNestedObjects handles encoding RawExtensions on the MasterConfig, ensuring the +// objects are decoded with the provided decoder. +func (c *MasterConfig) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for k, v := range c.AdmissionConfig.PluginConfig { + DecodeNestedRawExtensionOrUnknown(d, &v.Configuration) + c.AdmissionConfig.PluginConfig[k] = v + } + if c.OAuthConfig != nil { + for i := range c.OAuthConfig.IdentityProviders { + DecodeNestedRawExtensionOrUnknown(d, &c.OAuthConfig.IdentityProviders[i].Provider) + } + } + DecodeNestedRawExtensionOrUnknown(d, &c.AuditConfig.PolicyConfiguration) + return nil +} + +var _ runtime.NestedObjectEncoder = &MasterConfig{} + +// EncodeNestedObjects handles encoding RawExtensions on the MasterConfig, ensuring the +// objects are encoded with the provided encoder. +func (c *MasterConfig) EncodeNestedObjects(e runtime.Encoder) error { + for k, v := range c.AdmissionConfig.PluginConfig { + if err := EncodeNestedRawExtension(e, &v.Configuration); err != nil { + return err + } + c.AdmissionConfig.PluginConfig[k] = v + } + if c.OAuthConfig != nil { + for i := range c.OAuthConfig.IdentityProviders { + if err := EncodeNestedRawExtension(e, &c.OAuthConfig.IdentityProviders[i].Provider); err != nil { + return err + } + } + } + if err := EncodeNestedRawExtension(e, &c.AuditConfig.PolicyConfiguration); err != nil { + return err + } + return nil +} + +// DecodeNestedRawExtensionOrUnknown +func DecodeNestedRawExtensionOrUnknown(d runtime.Decoder, ext *runtime.RawExtension) { + if ext.Raw == nil || ext.Object != nil { + return + } + obj, gvk, err := d.Decode(ext.Raw, nil, nil) + if err != nil { + unk := &runtime.Unknown{Raw: ext.Raw} + if runtime.IsNotRegisteredError(err) { + if _, gvk, err := d.Decode(ext.Raw, nil, unk); err == nil { + unk.APIVersion = gvk.GroupVersion().String() + unk.Kind = gvk.Kind + ext.Object = unk + return + } + } + // TODO: record mime-type with the object + if gvk != nil { + unk.APIVersion = gvk.GroupVersion().String() + unk.Kind = gvk.Kind + } + obj = unk + } + ext.Object = obj +} + +// EncodeNestedRawExtension will encode the object in the RawExtension (if not nil) or +// return an error. +func EncodeNestedRawExtension(e runtime.Encoder, ext *runtime.RawExtension) error { + if ext.Raw != nil || ext.Object == nil { + return nil + } + data, err := runtime.Encode(e, ext.Object) + if err != nil { + return err + } + ext.Raw = data + return nil +} diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/stringsource.go b/vendor/github.com/openshift/api/legacyconfig/v1/stringsource.go new file mode 100644 index 0000000000000000000000000000000000000000..6a5718c1db27186e2a5eb851ead6bf5dc57dab35 --- /dev/null +++ b/vendor/github.com/openshift/api/legacyconfig/v1/stringsource.go @@ -0,0 +1,31 @@ +package v1 + +import "encoding/json" + +// UnmarshalJSON implements the json.Unmarshaller interface. +// If the value is a string, it sets the Value field of the StringSource. +// Otherwise, it is unmarshaled into the StringSourceSpec struct +func (s *StringSource) UnmarshalJSON(value []byte) error { + // If we can unmarshal to a simple string, just set the value + var simpleValue string + if err := json.Unmarshal(value, &simpleValue); err == nil { + s.Value = simpleValue + return nil + } + + // Otherwise do the full struct unmarshal + return json.Unmarshal(value, &s.StringSourceSpec) +} + +// MarshalJSON implements the json.Marshaller interface. +// If the StringSource contains only a string Value (or is empty), it is marshaled as a JSON string. +// Otherwise, the StringSourceSpec struct is marshaled as a JSON object. +func (s *StringSource) MarshalJSON() ([]byte, error) { + // If we have only a cleartext value set, do a simple string marshal + if s.StringSourceSpec == (StringSourceSpec{Value: s.Value}) { + return json.Marshal(s.Value) + } + + // Otherwise do the full struct marshal of the externalized bits + return json.Marshal(s.StringSourceSpec) +} diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/types.go b/vendor/github.com/openshift/api/legacyconfig/v1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..d9276cb1608997ab21adbaa644677e592bc38e32 --- /dev/null +++ b/vendor/github.com/openshift/api/legacyconfig/v1/types.go @@ -0,0 +1,1527 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + buildv1 "github.com/openshift/api/build/v1" +) + +type ExtendedArguments map[string][]string + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeConfig is the fully specified config starting an OpenShift node +type NodeConfig struct { + metav1.TypeMeta `json:",inline"` + + // NodeName is the value used to identify this particular node in the cluster. If possible, this should be your fully qualified hostname. + // If you're describing a set of static nodes to the master, this value must match one of the values in the list + NodeName string `json:"nodeName"` + + // Node may have multiple IPs, specify the IP to use for pod traffic routing + // If not specified, network parse/lookup on the nodeName is performed and the first non-loopback address is used + NodeIP string `json:"nodeIP"` + + // ServingInfo describes how to start serving + ServingInfo ServingInfo `json:"servingInfo"` + + // MasterKubeConfig is a filename for the .kubeconfig file that describes how to connect this node to the master + MasterKubeConfig string `json:"masterKubeConfig"` + + // MasterClientConnectionOverrides provides overrides to the client connection used to connect to the master. + MasterClientConnectionOverrides *ClientConnectionOverrides `json:"masterClientConnectionOverrides"` + + // DNSDomain holds the domain suffix that will be used for the DNS search path inside each container. Defaults to + // 'cluster.local'. + DNSDomain string `json:"dnsDomain"` + + // DNSIP is the IP address that pods will use to access cluster DNS. Defaults to the service IP of the Kubernetes + // master. This IP must be listening on port 53 for compatibility with libc resolvers (which cannot be configured + // to resolve names from any other port). When running more complex local DNS configurations, this is often set + // to the local address of a DNS proxy like dnsmasq, which then will consult either the local DNS (see + // dnsBindAddress) or the master DNS. + DNSIP string `json:"dnsIP"` + + // DNSBindAddress is the ip:port to serve DNS on. If this is not set, the DNS server will not be started. + // Because most DNS resolvers will only listen on port 53, if you select an alternative port you will need + // a DNS proxy like dnsmasq to answer queries for containers. A common configuration is dnsmasq configured + // on a node IP listening on 53 and delegating queries for dnsDomain to this process, while sending other + // queries to the host environments nameservers. + DNSBindAddress string `json:"dnsBindAddress"` + + // DNSNameservers is a list of ip:port values of recursive nameservers to forward queries to when running + // a local DNS server if dnsBindAddress is set. If this value is empty, the DNS server will default to + // the nameservers listed in /etc/resolv.conf. If you have configured dnsmasq or another DNS proxy on the + // system, this value should be set to the upstream nameservers dnsmasq resolves with. + DNSNameservers []string `json:"dnsNameservers"` + + // DNSRecursiveResolvConf is a path to a resolv.conf file that contains settings for an upstream server. + // Only the nameservers and port fields are used. The file must exist and parse correctly. It adds extra + // nameservers to DNSNameservers if set. + DNSRecursiveResolvConf string `json:"dnsRecursiveResolvConf"` + + // Deprecated and maintained for backward compatibility, use NetworkConfig.NetworkPluginName instead + DeprecatedNetworkPluginName string `json:"networkPluginName,omitempty"` + + // NetworkConfig provides network options for the node + NetworkConfig NodeNetworkConfig `json:"networkConfig"` + + // VolumeDirectory is the directory that volumes will be stored under + VolumeDirectory string `json:"volumeDirectory"` + + // ImageConfig holds options that describe how to build image names for system components + ImageConfig ImageConfig `json:"imageConfig"` + + // AllowDisabledDocker if true, the Kubelet will ignore errors from Docker. This means that a node can start on a machine that doesn't have docker started. + AllowDisabledDocker bool `json:"allowDisabledDocker"` + + // PodManifestConfig holds the configuration for enabling the Kubelet to + // create pods based from a manifest file(s) placed locally on the node + PodManifestConfig *PodManifestConfig `json:"podManifestConfig"` + + // AuthConfig holds authn/authz configuration options + AuthConfig NodeAuthConfig `json:"authConfig"` + + // DockerConfig holds Docker related configuration options. + DockerConfig DockerConfig `json:"dockerConfig"` + + // KubeletArguments are key value pairs that will be passed directly to the Kubelet that match the Kubelet's + // command line arguments. These are not migrated or validated, so if you use them they may become invalid. + // These values override other settings in NodeConfig which may cause invalid configurations. + KubeletArguments ExtendedArguments `json:"kubeletArguments,omitempty"` + + // ProxyArguments are key value pairs that will be passed directly to the Proxy that match the Proxy's + // command line arguments. These are not migrated or validated, so if you use them they may become invalid. + // These values override other settings in NodeConfig which may cause invalid configurations. + ProxyArguments ExtendedArguments `json:"proxyArguments,omitempty"` + + // IPTablesSyncPeriod is how often iptable rules are refreshed + IPTablesSyncPeriod string `json:"iptablesSyncPeriod"` + + // EnableUnidling controls whether or not the hybrid unidling proxy will be set up + EnableUnidling *bool `json:"enableUnidling"` + + // VolumeConfig contains options for configuring volumes on the node. + VolumeConfig NodeVolumeConfig `json:"volumeConfig"` +} + +// NodeVolumeConfig contains options for configuring volumes on the node. +type NodeVolumeConfig struct { + // LocalQuota contains options for controlling local volume quota on the node. + LocalQuota LocalQuota `json:"localQuota"` +} + +// MasterVolumeConfig contains options for configuring volume plugins in the master node. +type MasterVolumeConfig struct { + // DynamicProvisioningEnabled is a boolean that toggles dynamic provisioning off when false, defaults to true + DynamicProvisioningEnabled *bool `json:"dynamicProvisioningEnabled"` +} + +// LocalQuota contains options for controlling local volume quota on the node. +type LocalQuota struct { + // FSGroup can be specified to enable a quota on local storage use per unique FSGroup ID. + // At present this is only implemented for emptyDir volumes, and if the underlying + // volumeDirectory is on an XFS filesystem. + PerFSGroup *resource.Quantity `json:"perFSGroup"` +} + +// NodeAuthConfig holds authn/authz configuration options +type NodeAuthConfig struct { + // AuthenticationCacheTTL indicates how long an authentication result should be cached. + // It takes a valid time duration string (e.g. "5m"). If empty, you get the default timeout. If zero (e.g. "0m"), caching is disabled + AuthenticationCacheTTL string `json:"authenticationCacheTTL"` + + // AuthenticationCacheSize indicates how many authentication results should be cached. If 0, the default cache size is used. + AuthenticationCacheSize int `json:"authenticationCacheSize"` + + // AuthorizationCacheTTL indicates how long an authorization result should be cached. + // It takes a valid time duration string (e.g. "5m"). If empty, you get the default timeout. If zero (e.g. "0m"), caching is disabled + AuthorizationCacheTTL string `json:"authorizationCacheTTL"` + + // AuthorizationCacheSize indicates how many authorization results should be cached. If 0, the default cache size is used. + AuthorizationCacheSize int `json:"authorizationCacheSize"` +} + +// NodeNetworkConfig provides network options for the node +type NodeNetworkConfig struct { + // NetworkPluginName is a string specifying the networking plugin + NetworkPluginName string `json:"networkPluginName"` + // Maximum transmission unit for the network packets + MTU uint32 `json:"mtu"` +} + +// DockerConfig holds Docker related configuration options. +type DockerConfig struct { + // ExecHandlerName is the name of the handler to use for executing + // commands in containers. + ExecHandlerName DockerExecHandlerType `json:"execHandlerName"` + // DockerShimSocket is the location of the dockershim socket the kubelet uses. + // Currently unix socket is supported on Linux, and tcp is supported on windows. + // Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735' + DockerShimSocket string `json:"dockerShimSocket"` + // DockershimRootDirectory is the dockershim root directory. + DockershimRootDirectory string `json:"dockerShimRootDirectory"` +} + +type DockerExecHandlerType string + +const ( + // DockerExecHandlerNative uses Docker's exec API for executing commands in containers. + DockerExecHandlerNative DockerExecHandlerType = "native" + // DockerExecHandlerNsenter uses nsenter for executing commands in containers. + DockerExecHandlerNsenter DockerExecHandlerType = "nsenter" + + // ControllersDisabled indicates no controllers should be enabled. + ControllersDisabled = "none" + // ControllersAll indicates all controllers should be started. + ControllersAll = "*" +) + +// FeatureList contains a set of features +type FeatureList []string + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MasterConfig holds the necessary configuration options for the OpenShift master +type MasterConfig struct { + metav1.TypeMeta `json:",inline"` + + // ServingInfo describes how to start serving + ServingInfo HTTPServingInfo `json:"servingInfo"` + + // AuthConfig configures authentication options in addition to the standard + // oauth token and client certificate authenticators + AuthConfig MasterAuthConfig `json:"authConfig"` + + // AggregatorConfig has options for configuring the aggregator component of the API server. + AggregatorConfig AggregatorConfig `json:"aggregatorConfig"` + + // CORSAllowedOrigins + CORSAllowedOrigins []string `json:"corsAllowedOrigins"` + + // APILevels is a list of API levels that should be enabled on startup: v1 as examples + APILevels []string `json:"apiLevels"` + + // MasterPublicURL is how clients can access the OpenShift API server + MasterPublicURL string `json:"masterPublicURL"` + + // Controllers is a list of the controllers that should be started. If set to "none", no controllers + // will start automatically. The default value is "*" which will start all controllers. When + // using "*", you may exclude controllers by prepending a "-" in front of their name. No other + // values are recognized at this time. + Controllers string `json:"controllers"` + + // AdmissionConfig contains admission control plugin configuration. + AdmissionConfig AdmissionConfig `json:"admissionConfig"` + + // ControllerConfig holds configuration values for controllers + ControllerConfig ControllerConfig `json:"controllerConfig"` + + // EtcdStorageConfig contains information about how API resources are + // stored in Etcd. These values are only relevant when etcd is the + // backing store for the cluster. + EtcdStorageConfig EtcdStorageConfig `json:"etcdStorageConfig"` + + // EtcdClientInfo contains information about how to connect to etcd + EtcdClientInfo EtcdConnectionInfo `json:"etcdClientInfo"` + // KubeletClientInfo contains information about how to connect to kubelets + KubeletClientInfo KubeletConnectionInfo `json:"kubeletClientInfo"` + + // KubernetesMasterConfig, if present start the kubernetes master in this process + KubernetesMasterConfig KubernetesMasterConfig `json:"kubernetesMasterConfig"` + // EtcdConfig, if present start etcd in this process + EtcdConfig *EtcdConfig `json:"etcdConfig"` + // OAuthConfig, if present start the /oauth endpoint in this process + OAuthConfig *OAuthConfig `json:"oauthConfig"` + + // DNSConfig, if present start the DNS server in this process + DNSConfig *DNSConfig `json:"dnsConfig"` + + // ServiceAccountConfig holds options related to service accounts + ServiceAccountConfig ServiceAccountConfig `json:"serviceAccountConfig"` + + // MasterClients holds all the client connection information for controllers and other system components + MasterClients MasterClients `json:"masterClients"` + + // ImageConfig holds options that describe how to build image names for system components + ImageConfig ImageConfig `json:"imageConfig"` + + // ImagePolicyConfig controls limits and behavior for importing images + ImagePolicyConfig ImagePolicyConfig `json:"imagePolicyConfig"` + + // PolicyConfig holds information about where to locate critical pieces of bootstrapping policy + PolicyConfig PolicyConfig `json:"policyConfig"` + + // ProjectConfig holds information about project creation and defaults + ProjectConfig ProjectConfig `json:"projectConfig"` + + // RoutingConfig holds information about routing and route generation + RoutingConfig RoutingConfig `json:"routingConfig"` + + // NetworkConfig to be passed to the compiled in network plugin + NetworkConfig MasterNetworkConfig `json:"networkConfig"` + + // MasterVolumeConfig contains options for configuring volume plugins in the master node. + VolumeConfig MasterVolumeConfig `json:"volumeConfig"` + + // JenkinsPipelineConfig holds information about the default Jenkins template + // used for JenkinsPipeline build strategy. + JenkinsPipelineConfig JenkinsPipelineConfig `json:"jenkinsPipelineConfig"` + + // AuditConfig holds information related to auditing capabilities. + AuditConfig AuditConfig `json:"auditConfig"` + + // DisableOpenAPI avoids starting the openapi endpoint because it is very expensive. + // This option will be removed at a later time. It is never serialized. + DisableOpenAPI bool `json:"-"` +} + +// MasterAuthConfig configures authentication options in addition to the standard +// oauth token and client certificate authenticators +type MasterAuthConfig struct { + // RequestHeader holds options for setting up a front proxy against the the API. It is optional. + RequestHeader *RequestHeaderAuthenticationOptions `json:"requestHeader"` + // WebhookTokenAuthnConfig, if present configures remote token reviewers + WebhookTokenAuthenticators []WebhookTokenAuthenticator `json:"webhookTokenAuthenticators"` + // OAuthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization + // Server Metadata for an external OAuth server. + // See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + // This option is mutually exclusive with OAuthConfig + OAuthMetadataFile string `json:"oauthMetadataFile"` +} + +// RequestHeaderAuthenticationOptions provides options for setting up a front proxy against the entire +// API instead of against the /oauth endpoint. +type RequestHeaderAuthenticationOptions struct { + // ClientCA is a file with the trusted signer certs. It is required. + ClientCA string `json:"clientCA"` + // ClientCommonNames is a required list of common names to require a match from. + ClientCommonNames []string `json:"clientCommonNames"` + + // UsernameHeaders is the list of headers to check for user information. First hit wins. + UsernameHeaders []string `json:"usernameHeaders"` + // GroupNameHeader is the set of headers to check for group information. All are unioned. + GroupHeaders []string `json:"groupHeaders"` + // ExtraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested. + ExtraHeaderPrefixes []string `json:"extraHeaderPrefixes"` +} + +// AggregatorConfig holds information required to make the aggregator function. +type AggregatorConfig struct { + // ProxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers + ProxyClientInfo CertInfo `json:"proxyClientInfo"` +} + +type LogFormatType string + +type WebHookModeType string + +const ( + // LogFormatLegacy saves event in 1-line text format. + LogFormatLegacy LogFormatType = "legacy" + // LogFormatJson saves event in structured json format. + LogFormatJson LogFormatType = "json" + + // WebHookModeBatch indicates that the webhook should buffer audit events + // internally, sending batch updates either once a certain number of + // events have been received or a certain amount of time has passed. + WebHookModeBatch WebHookModeType = "batch" + // WebHookModeBlocking causes the webhook to block on every attempt to process + // a set of events. This causes requests to the API server to wait for a + // round trip to the external audit service before sending a response. + WebHookModeBlocking WebHookModeType = "blocking" +) + +// AuditConfig holds configuration for the audit capabilities +type AuditConfig struct { + // If this flag is set, audit log will be printed in the logs. + // The logs contains, method, user and a requested URL. + Enabled bool `json:"enabled"` + // All requests coming to the apiserver will be logged to this file. + AuditFilePath string `json:"auditFilePath"` + // Maximum number of days to retain old log files based on the timestamp encoded in their filename. + MaximumFileRetentionDays int `json:"maximumFileRetentionDays"` + // Maximum number of old log files to retain. + MaximumRetainedFiles int `json:"maximumRetainedFiles"` + // Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB. + MaximumFileSizeMegabytes int `json:"maximumFileSizeMegabytes"` + + // PolicyFile is a path to the file that defines the audit policy configuration. + PolicyFile string `json:"policyFile"` + // PolicyConfiguration is an embedded policy configuration object to be used + // as the audit policy configuration. If present, it will be used instead of + // the path to the policy file. + PolicyConfiguration runtime.RawExtension `json:"policyConfiguration"` + + // Format of saved audits (legacy or json). + LogFormat LogFormatType `json:"logFormat"` + + // Path to a .kubeconfig formatted file that defines the audit webhook configuration. + WebHookKubeConfig string `json:"webHookKubeConfig"` + // Strategy for sending audit events (block or batch). + WebHookMode WebHookModeType `json:"webHookMode"` +} + +// JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy +type JenkinsPipelineConfig struct { + // AutoProvisionEnabled determines whether a Jenkins server will be spawned from the provided + // template when the first build config in the project with type JenkinsPipeline + // is created. When not specified this option defaults to true. + AutoProvisionEnabled *bool `json:"autoProvisionEnabled"` + // TemplateNamespace contains the namespace name where the Jenkins template is stored + TemplateNamespace string `json:"templateNamespace"` + // TemplateName is the name of the default Jenkins template + TemplateName string `json:"templateName"` + // ServiceName is the name of the Jenkins service OpenShift uses to detect + // whether a Jenkins pipeline handler has already been installed in a project. + // This value *must* match a service name in the provided template. + ServiceName string `json:"serviceName"` + // Parameters specifies a set of optional parameters to the Jenkins template. + Parameters map[string]string `json:"parameters"` +} + +// ImagePolicyConfig holds the necessary configuration options for limits and behavior for importing images +type ImagePolicyConfig struct { + // MaxImagesBulkImportedPerRepository controls the number of images that are imported when a user + // does a bulk import of a container repository. This number defaults to 50 to prevent users from + // importing large numbers of images accidentally. Set -1 for no limit. + MaxImagesBulkImportedPerRepository int `json:"maxImagesBulkImportedPerRepository"` + // DisableScheduledImport allows scheduled background import of images to be disabled. + DisableScheduledImport bool `json:"disableScheduledImport"` + // ScheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams + // scheduled for background import are checked against the upstream repository. The default value is 15 minutes. + ScheduledImageImportMinimumIntervalSeconds int `json:"scheduledImageImportMinimumIntervalSeconds"` + // MaxScheduledImageImportsPerMinute is the maximum number of scheduled image streams that will be imported in the + // background per minute. The default value is 60. Set to -1 for unlimited. + MaxScheduledImageImportsPerMinute int `json:"maxScheduledImageImportsPerMinute"` + // AllowedRegistriesForImport limits the container image registries that normal users may import + // images from. Set this list to the registries that you trust to contain valid Docker + // images and that you want applications to be able to import from. Users with + // permission to create Images or ImageStreamMappings via the API are not affected by + // this policy - typically only administrators or system integrations will have those + // permissions. + AllowedRegistriesForImport *AllowedRegistries `json:"allowedRegistriesForImport,omitempty"` + // InternalRegistryHostname sets the hostname for the default internal image + // registry. The value must be in "hostname[:port]" format. + // For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY + // environment variable but this setting overrides the environment variable. + InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"` + // ExternalRegistryHostname sets the hostname for the default external image + // registry. The external hostname should be set only when the image registry + // is exposed externally. The value is used in 'publicDockerImageRepository' + // field in ImageStreams. The value must be in "hostname[:port]" format. + ExternalRegistryHostname string `json:"externalRegistryHostname,omitempty"` + // AdditionalTrustedCA is a path to a pem bundle file containing additional CAs that + // should be trusted during imagestream import. + AdditionalTrustedCA string `json:"additionalTrustedCA,omitempty"` +} + +// AllowedRegistries represents a list of registries allowed for the image import. +type AllowedRegistries []RegistryLocation + +// RegistryLocation contains a location of the registry specified by the registry domain +// name. The domain name might include wildcards, like '*' or '??'. +type RegistryLocation struct { + // DomainName specifies a domain name for the registry + // In case the registry use non-standard (80 or 443) port, the port should be included + // in the domain name as well. + DomainName string `json:"domainName"` + // Insecure indicates whether the registry is secure (https) or insecure (http) + // By default (if not specified) the registry is assumed as secure. + Insecure bool `json:"insecure,omitempty"` +} + +// holds the necessary configuration options for +type ProjectConfig struct { + // DefaultNodeSelector holds default project node label selector + DefaultNodeSelector string `json:"defaultNodeSelector"` + + // ProjectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint + ProjectRequestMessage string `json:"projectRequestMessage"` + + // ProjectRequestTemplate is the template to use for creating projects in response to projectrequest. + // It is in the format namespace/template and it is optional. + // If it is not specified, a default template is used. + ProjectRequestTemplate string `json:"projectRequestTemplate"` + + // SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled. + SecurityAllocator *SecurityAllocator `json:"securityAllocator"` +} + +// SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled. +type SecurityAllocator struct { + // UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the + // block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks + // before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the + // ranges container images will use once user namespaces are started). + UIDAllocatorRange string `json:"uidAllocatorRange"` + // MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is + // "<prefix>/<numberOfLabels>[,<maxCategory>]". The default is "s0/2" and will allocate from c0 -> c1023, which means a total of 535k labels + // are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated + // to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default + // will allow the server to set them automatically. + // + // Examples: + // * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 + // * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511 + // + MCSAllocatorRange string `json:"mcsAllocatorRange"` + // MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS + // ranges (100k namespaces, 535k/5 labels). + MCSLabelsPerProject int `json:"mcsLabelsPerProject"` +} + +// holds the necessary configuration options for +type PolicyConfig struct { + // UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS! + UserAgentMatchingConfig UserAgentMatchingConfig `json:"userAgentMatchingConfig"` +} + +// UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS! +type UserAgentMatchingConfig struct { + // If this list is non-empty, then a User-Agent must match one of the UserAgentRegexes to be allowed + RequiredClients []UserAgentMatchRule `json:"requiredClients"` + + // If this list is non-empty, then a User-Agent must not match any of the UserAgentRegexes + DeniedClients []UserAgentDenyRule `json:"deniedClients"` + + // DefaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given. + DefaultRejectionMessage string `json:"defaultRejectionMessage"` +} + +// UserAgentMatchRule describes how to match a given request based on User-Agent and HTTPVerb +type UserAgentMatchRule struct { + // UserAgentRegex is a regex that is checked against the User-Agent. + // Known variants of oc clients + // 1. oc accessing kube resources: oc/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 2. oc accessing openshift resources: oc/v1.1.3 (linux/amd64) openshift/b348c2f + // 3. openshift kubectl accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 4. openshift kubectl accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f + // 5. oadm accessing kube resources: oadm/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 6. oadm accessing openshift resources: oadm/v1.1.3 (linux/amd64) openshift/b348c2f + // 7. openshift cli accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 8. openshift cli accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f + Regex string `json:"regex"` + + // HTTPVerbs specifies which HTTP verbs should be matched. An empty list means "match all verbs". + HTTPVerbs []string `json:"httpVerbs"` +} + +// UserAgentDenyRule adds a rejection message that can be used to help a user figure out how to get an approved client +type UserAgentDenyRule struct { + UserAgentMatchRule `json:",inline"` + + // RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used. + RejectionMessage string `json:"rejectionMessage"` +} + +// RoutingConfig holds the necessary configuration options for routing to subdomains +type RoutingConfig struct { + // Subdomain is the suffix appended to $service.$namespace. to form the default route hostname + // DEPRECATED: This field is being replaced by routers setting their own defaults. This is the + // "default" route. + Subdomain string `json:"subdomain"` +} + +// MasterNetworkConfig to be passed to the compiled in network plugin +type MasterNetworkConfig struct { + // NetworkPluginName is the name of the network plugin to use + NetworkPluginName string `json:"networkPluginName"` + // ClusterNetworkCIDR is the CIDR string to specify the global overlay network's L3 space. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead. + DeprecatedClusterNetworkCIDR string `json:"clusterNetworkCIDR,omitempty"` + // ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addressed from. If this is specified, then ClusterNetworkCIDR and HostSubnetLength may not be set. + ClusterNetworks []ClusterNetworkEntry `json:"clusterNetworks"` + // HostSubnetLength is the number of bits to allocate to each host's subnet e.g. 8 would mean a /24 network on the host. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead. + DeprecatedHostSubnetLength uint32 `json:"hostSubnetLength,omitempty"` + // ServiceNetwork is the CIDR string to specify the service networks + ServiceNetworkCIDR string `json:"serviceNetworkCIDR"` + // ExternalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP + // may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that + // CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You + // should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons. + ExternalIPNetworkCIDRs []string `json:"externalIPNetworkCIDRs"` + // IngressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare + // metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from. + // For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips, + // nodes, pods, or services. + IngressIPNetworkCIDR string `json:"ingressIPNetworkCIDR"` + // VXLANPort is the VXLAN port used by the cluster defaults. If it is not set, 4789 is the default value + VXLANPort uint32 `json:"vxlanPort,omitempty"` +} + +// ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips. +type ClusterNetworkEntry struct { + // CIDR defines the total range of a cluster networks address space. + CIDR string `json:"cidr"` + // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod. + HostSubnetLength uint32 `json:"hostSubnetLength"` +} + +// ImageConfig holds the necessary configuration options for building image names for system components +type ImageConfig struct { + // Format is the format of the name to be built for the system component + Format string `json:"format"` + // Latest determines if the latest tag will be pulled from the registry + Latest bool `json:"latest"` +} + +// RemoteConnectionInfo holds information necessary for establishing a remote connection +type RemoteConnectionInfo struct { + // URL is the remote URL to connect to + URL string `json:"url"` + // CA is the CA for verifying TLS connections + CA string `json:"ca"` + // CertInfo is the TLS client cert information to present + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` +} + +// KubeletConnectionInfo holds information necessary for connecting to a kubelet +type KubeletConnectionInfo struct { + // Port is the port to connect to kubelets on + Port uint `json:"port"` + // CA is the CA for verifying TLS connections to kubelets + CA string `json:"ca"` + // CertInfo is the TLS client cert information for securing communication to kubelets + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` +} + +// EtcdConnectionInfo holds information necessary for connecting to an etcd server +type EtcdConnectionInfo struct { + // URLs are the URLs for etcd + URLs []string `json:"urls"` + // CA is a file containing trusted roots for the etcd server certificates + CA string `json:"ca"` + // CertInfo is the TLS client cert information for securing communication to etcd + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` +} + +// EtcdStorageConfig holds the necessary configuration options for the etcd storage underlying OpenShift and Kubernetes +type EtcdStorageConfig struct { + // KubernetesStorageVersion is the API version that Kube resources in etcd should be + // serialized to. This value should *not* be advanced until all clients in the + // cluster that read from etcd have code that allows them to read the new version. + KubernetesStorageVersion string `json:"kubernetesStorageVersion"` + // KubernetesStoragePrefix is the path within etcd that the Kubernetes resources will + // be rooted under. This value, if changed, will mean existing objects in etcd will + // no longer be located. The default value is 'kubernetes.io'. + KubernetesStoragePrefix string `json:"kubernetesStoragePrefix"` + // OpenShiftStorageVersion is the API version that OS resources in etcd should be + // serialized to. This value should *not* be advanced until all clients in the + // cluster that read from etcd have code that allows them to read the new version. + OpenShiftStorageVersion string `json:"openShiftStorageVersion"` + // OpenShiftStoragePrefix is the path within etcd that the OpenShift resources will + // be rooted under. This value, if changed, will mean existing objects in etcd will + // no longer be located. The default value is 'openshift.io'. + OpenShiftStoragePrefix string `json:"openShiftStoragePrefix"` +} + +// ServingInfo holds information about serving web pages +type ServingInfo struct { + // BindAddress is the ip:port to serve on + BindAddress string `json:"bindAddress"` + // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", + // "tcp4", and "tcp6" + BindNetwork string `json:"bindNetwork"` + // CertInfo is the TLS cert info for serving secure traffic. + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` + // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates + ClientCA string `json:"clientCA"` + // NamedCertificates is a list of certificates to use to secure requests to specific hostnames + NamedCertificates []NamedCertificate `json:"namedCertificates"` + // MinTLSVersion is the minimum TLS version supported. + // Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants + MinTLSVersion string `json:"minTLSVersion,omitempty"` + // CipherSuites contains an overridden list of ciphers for the server to support. + // Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants + CipherSuites []string `json:"cipherSuites,omitempty"` +} + +// NamedCertificate specifies a certificate/key, and the names it should be served for +type NamedCertificate struct { + // Names is a list of DNS names this certificate should be used to secure + // A name can be a normal DNS name, or can contain leading wildcard segments. + Names []string `json:"names"` + // CertInfo is the TLS cert info for serving secure traffic + CertInfo `json:",inline"` +} + +// HTTPServingInfo holds configuration for serving HTTP +type HTTPServingInfo struct { + // ServingInfo is the HTTP serving information + ServingInfo `json:",inline"` + // MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit. + MaxRequestsInFlight int `json:"maxRequestsInFlight"` + // RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if + // -1 there is no limit on requests. + RequestTimeoutSeconds int `json:"requestTimeoutSeconds"` +} + +// MasterClients holds references to `.kubeconfig` files that qualify master clients for OpenShift and Kubernetes +type MasterClients struct { + // OpenShiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master + OpenShiftLoopbackKubeConfig string `json:"openshiftLoopbackKubeConfig"` + + // OpenShiftLoopbackClientConnectionOverrides specifies client overrides for system components to loop back to this master. + OpenShiftLoopbackClientConnectionOverrides *ClientConnectionOverrides `json:"openshiftLoopbackClientConnectionOverrides"` +} + +// ClientConnectionOverrides are a set of overrides to the default client connection settings. +type ClientConnectionOverrides struct { + // AcceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the + // default value of 'application/json'. This field will control all connections to the server used by a particular + // client. + AcceptContentTypes string `json:"acceptContentTypes"` + // ContentType is the content type used when sending data to the server from this client. + ContentType string `json:"contentType"` + + // QPS controls the number of queries per second allowed for this connection. + QPS float32 `json:"qps"` + // Burst allows extra queries to accumulate when a client is exceeding its rate. + Burst int32 `json:"burst"` +} + +// DNSConfig holds the necessary configuration options for DNS +type DNSConfig struct { + // BindAddress is the ip:port to serve DNS on + BindAddress string `json:"bindAddress"` + // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", + // "tcp4", and "tcp6" + BindNetwork string `json:"bindNetwork"` + // AllowRecursiveQueries allows the DNS server on the master to answer queries recursively. Note that open + // resolvers can be used for DNS amplification attacks and the master DNS should not be made accessible + // to public networks. + AllowRecursiveQueries bool `json:"allowRecursiveQueries"` +} + +// WebhookTokenAuthenticators holds the necessary configuation options for +// external token authenticators +type WebhookTokenAuthenticator struct { + // ConfigFile is a path to a Kubeconfig file with the webhook configuration + ConfigFile string `json:"configFile"` + // CacheTTL indicates how long an authentication result should be cached. + // It takes a valid time duration string (e.g. "5m"). + // If empty, you get a default timeout of 2 minutes. + // If zero (e.g. "0m"), caching is disabled + CacheTTL string `json:"cacheTTL"` +} + +// OAuthConfig holds the necessary configuration options for OAuth authentication +type OAuthConfig struct { + // MasterCA is the CA for verifying the TLS connection back to the MasterURL. + MasterCA *string `json:"masterCA"` + + // MasterURL is used for making server-to-server calls to exchange authorization codes for access tokens + MasterURL string `json:"masterURL"` + + // MasterPublicURL is used for building valid client redirect URLs for internal and external access + MasterPublicURL string `json:"masterPublicURL"` + + // AssetPublicURL is used for building valid client redirect URLs for external access + AssetPublicURL string `json:"assetPublicURL"` + + // AlwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider. + AlwaysShowProviderSelection bool `json:"alwaysShowProviderSelection"` + + //IdentityProviders is an ordered list of ways for a user to identify themselves + IdentityProviders []IdentityProvider `json:"identityProviders"` + + // GrantConfig describes how to handle grants + GrantConfig GrantConfig `json:"grantConfig"` + + // SessionConfig hold information about configuring sessions. + SessionConfig *SessionConfig `json:"sessionConfig"` + + // TokenConfig contains options for authorization and access tokens + TokenConfig TokenConfig `json:"tokenConfig"` + + // Templates allow you to customize pages like the login page. + Templates *OAuthTemplates `json:"templates"` +} + +// OAuthTemplates allow for customization of pages like the login page +type OAuthTemplates struct { + // Login is a path to a file containing a go template used to render the login page. + // If unspecified, the default login page is used. + Login string `json:"login"` + + // ProviderSelection is a path to a file containing a go template used to render the provider selection page. + // If unspecified, the default provider selection page is used. + ProviderSelection string `json:"providerSelection"` + + // Error is a path to a file containing a go template used to render error pages during the authentication or grant flow + // If unspecified, the default error page is used. + Error string `json:"error"` +} + +// ServiceAccountConfig holds the necessary configuration options for a service account +type ServiceAccountConfig struct { + // ManagedNames is a list of service account names that will be auto-created in every namespace. + // If no names are specified, the ServiceAccountsController will not be started. + ManagedNames []string `json:"managedNames"` + + // LimitSecretReferences controls whether or not to allow a service account to reference any secret in a namespace + // without explicitly referencing them + LimitSecretReferences bool `json:"limitSecretReferences"` + + // PrivateKeyFile is a file containing a PEM-encoded private RSA key, used to sign service account tokens. + // If no private key is specified, the service account TokensController will not be started. + PrivateKeyFile string `json:"privateKeyFile"` + + // PublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. + // (If any file contains a private key, the public portion of the key is used) + // The list of public keys is used to verify presented service account tokens. + // Each key is tried in order until the list is exhausted or verification succeeds. + // If no keys are specified, no service account authentication will be available. + PublicKeyFiles []string `json:"publicKeyFiles"` + + // MasterCA is the CA for verifying the TLS connection back to the master. The service account controller will automatically + // inject the contents of this file into pods so they can verify connections to the master. + MasterCA string `json:"masterCA"` +} + +// TokenConfig holds the necessary configuration options for authorization and access tokens +type TokenConfig struct { + // AuthorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens + AuthorizeTokenMaxAgeSeconds int32 `json:"authorizeTokenMaxAgeSeconds"` + // AccessTokenMaxAgeSeconds defines the maximum age of access tokens + AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds"` + // AccessTokenInactivityTimeoutSeconds defined the default token + // inactivity timeout for tokens granted by any client. + // Setting it to nil means the feature is completely disabled (default) + // The default setting can be overriden on OAuthClient basis. + // The value represents the maximum amount of time that can occur between + // consecutive uses of the token. Tokens become invalid if they are not + // used within this temporal window. The user will need to acquire a new + // token to regain access once a token times out. + // Valid values are: + // - 0: Tokens never time out + // - X: Tokens time out if there is no activity for X seconds + // The current minimum allowed value for X is 300 (5 minutes) + AccessTokenInactivityTimeoutSeconds *int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"` +} + +// SessionConfig specifies options for cookie-based sessions. Used by AuthRequestHandlerSession +type SessionConfig struct { + // SessionSecretsFile is a reference to a file containing a serialized SessionSecrets object + // If no file is specified, a random signing and encryption key are generated at each server start + SessionSecretsFile string `json:"sessionSecretsFile"` + // SessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession + SessionMaxAgeSeconds int32 `json:"sessionMaxAgeSeconds"` + // SessionName is the cookie name used to store the session + SessionName string `json:"sessionName"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SessionSecrets list the secrets to use to sign/encrypt and authenticate/decrypt created sessions. +type SessionSecrets struct { + metav1.TypeMeta `json:",inline"` + + // Secrets is a list of secrets + // New sessions are signed and encrypted using the first secret. + // Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets. + Secrets []SessionSecret `json:"secrets"` +} + +// SessionSecret is a secret used to authenticate/decrypt cookie-based sessions +type SessionSecret struct { + // Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes. + Authentication string `json:"authentication"` + // Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES- + Encryption string `json:"encryption"` +} + +// IdentityProvider provides identities for users authenticating using credentials +type IdentityProvider struct { + // Name is used to qualify the identities returned by this provider + Name string `json:"name"` + // UseAsChallenger indicates whether to issue WWW-Authenticate challenges for this provider + UseAsChallenger bool `json:"challenge"` + // UseAsLogin indicates whether to use this identity provider for unauthenticated browsers to login against + UseAsLogin bool `json:"login"` + // MappingMethod determines how identities from this provider are mapped to users + MappingMethod string `json:"mappingMethod"` + // Provider contains the information about how to set up a specific identity provider + Provider runtime.RawExtension `json:"provider"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials +type BasicAuthPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // RemoteConnectionInfo contains information about how to connect to the external basic auth server + RemoteConnectionInfo `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AllowAllPasswordIdentityProvider provides identities for users authenticating using non-empty passwords +type AllowAllPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DenyAllPasswordIdentityProvider provides no identities for users +type DenyAllPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials +type HTPasswdPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // File is a reference to your htpasswd file + File string `json:"file"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials +type LDAPPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + // URL is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is + // ldap://host:port/basedn?attribute?scope?filter + URL string `json:"url"` + // BindDN is an optional DN to bind with during the search phase. + BindDN string `json:"bindDN"` + // BindPassword is an optional password to bind with during the search phase. + BindPassword StringSource `json:"bindPassword"` + + // Insecure, if true, indicates the connection should not use TLS. + // Cannot be set to true with a URL scheme of "ldaps://" + // If false, "ldaps://" URLs connect using TLS, and "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830 + Insecure bool `json:"insecure"` + // CA is the optional trusted certificate authority bundle to use when making requests to the server + // If empty, the default system roots are used + CA string `json:"ca"` + // Attributes maps LDAP attributes to identities + Attributes LDAPAttributeMapping `json:"attributes"` +} + +// LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields +type LDAPAttributeMapping struct { + // ID is the list of attributes whose values should be used as the user ID. Required. + // LDAP standard identity attribute is "dn" + ID []string `json:"id"` + // PreferredUsername is the list of attributes whose values should be used as the preferred username. + // LDAP standard login attribute is "uid" + PreferredUsername []string `json:"preferredUsername"` + // Name is the list of attributes whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + // LDAP standard display name attribute is "cn" + Name []string `json:"name"` + // Email is the list of attributes whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + Email []string `json:"email"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials +type KeystonePasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + // RemoteConnectionInfo contains information about how to connect to the keystone server + RemoteConnectionInfo `json:",inline"` + // Domain Name is required for keystone v3 + DomainName string `json:"domainName"` + // UseKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username + UseKeystoneIdentity bool `json:"useKeystoneIdentity"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials +type RequestHeaderIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // LoginURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + LoginURL string `json:"loginURL"` + + // ChallengeURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + ChallengeURL string `json:"challengeURL"` + + // ClientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header. + ClientCA string `json:"clientCA"` + // ClientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative. + ClientCommonNames []string `json:"clientCommonNames"` + + // Headers is the set of headers to check for identity information + Headers []string `json:"headers"` + // PreferredUsernameHeaders is the set of headers to check for the preferred username + PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"` + // NameHeaders is the set of headers to check for the display name + NameHeaders []string `json:"nameHeaders"` + // EmailHeaders is the set of headers to check for the email address + EmailHeaders []string `json:"emailHeaders"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GitHubIdentityProvider provides identities for users authenticating using GitHub credentials +type GitHubIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // ClientID is the oauth client ID + ClientID string `json:"clientID"` + // ClientSecret is the oauth client secret + ClientSecret StringSource `json:"clientSecret"` + // Organizations optionally restricts which organizations are allowed to log in + Organizations []string `json:"organizations"` + // Teams optionally restricts which teams are allowed to log in. Format is <org>/<team>. + Teams []string `json:"teams"` + // Hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of GitHub Enterprise. + // It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname. + Hostname string `json:"hostname"` + // CA is the optional trusted certificate authority bundle to use when making requests to the server. + // If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. + CA string `json:"ca"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GitLabIdentityProvider provides identities for users authenticating using GitLab credentials +type GitLabIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // CA is the optional trusted certificate authority bundle to use when making requests to the server + // If empty, the default system roots are used + CA string `json:"ca"` + // URL is the oauth server base URL + URL string `json:"url"` + // ClientID is the oauth client ID + ClientID string `json:"clientID"` + // ClientSecret is the oauth client secret + ClientSecret StringSource `json:"clientSecret"` + // Legacy determines if OAuth2 or OIDC should be used + // If true, OAuth2 is used + // If false, OIDC is used + // If nil and the URL's host is gitlab.com, OIDC is used + // Otherwise, OAuth2 is used + // In a future release, nil will default to using OIDC + // Eventually this flag will be removed and only OIDC will be used + Legacy *bool `json:"legacy,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GoogleIdentityProvider provides identities for users authenticating using Google credentials +type GoogleIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // ClientID is the oauth client ID + ClientID string `json:"clientID"` + // ClientSecret is the oauth client secret + ClientSecret StringSource `json:"clientSecret"` + + // HostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to + HostedDomain string `json:"hostedDomain"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials +type OpenIDIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // CA is the optional trusted certificate authority bundle to use when making requests to the server + // If empty, the default system roots are used + CA string `json:"ca"` + + // ClientID is the oauth client ID + ClientID string `json:"clientID"` + // ClientSecret is the oauth client secret + ClientSecret StringSource `json:"clientSecret"` + + // ExtraScopes are any scopes to request in addition to the standard "openid" scope. + ExtraScopes []string `json:"extraScopes"` + + // ExtraAuthorizeParameters are any custom parameters to add to the authorize request. + ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters"` + + // URLs to use to authenticate + URLs OpenIDURLs `json:"urls"` + + // Claims mappings + Claims OpenIDClaims `json:"claims"` +} + +// OpenIDURLs are URLs to use when authenticating with an OpenID identity provider +type OpenIDURLs struct { + // Authorize is the oauth authorization URL + Authorize string `json:"authorize"` + // Token is the oauth token granting URL + Token string `json:"token"` + // UserInfo is the optional userinfo URL. + // If present, a granted access_token is used to request claims + // If empty, a granted id_token is parsed for claims + UserInfo string `json:"userInfo"` +} + +// OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider +type OpenIDClaims struct { + // ID is the list of claims whose values should be used as the user ID. Required. + // OpenID standard identity claim is "sub" + ID []string `json:"id"` + // PreferredUsername is the list of claims whose values should be used as the preferred username. + // If unspecified, the preferred username is determined from the value of the id claim + PreferredUsername []string `json:"preferredUsername"` + // Name is the list of claims whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + Name []string `json:"name"` + // Email is the list of claims whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + Email []string `json:"email"` +} + +// GrantConfig holds the necessary configuration options for grant handlers +type GrantConfig struct { + // Method determines the default strategy to use when an OAuth client requests a grant. + // This method will be used only if the specific OAuth client doesn't provide a strategy + // of their own. Valid grant handling methods are: + // - auto: always approves grant requests, useful for trusted clients + // - prompt: prompts the end user for approval of grant requests, useful for third-party clients + // - deny: always denies grant requests, useful for black-listed clients + Method GrantHandlerType `json:"method"` + + // ServiceAccountMethod is used for determining client authorization for service account oauth client. + // It must be either: deny, prompt + ServiceAccountMethod GrantHandlerType `json:"serviceAccountMethod"` +} + +type GrantHandlerType string + +const ( + // GrantHandlerAuto auto-approves client authorization grant requests + GrantHandlerAuto GrantHandlerType = "auto" + // GrantHandlerPrompt prompts the user to approve new client authorization grant requests + GrantHandlerPrompt GrantHandlerType = "prompt" + // GrantHandlerDeny auto-denies client authorization grant requests + GrantHandlerDeny GrantHandlerType = "deny" +) + +// EtcdConfig holds the necessary configuration options for connecting with an etcd database +type EtcdConfig struct { + // ServingInfo describes how to start serving the etcd master + ServingInfo ServingInfo `json:"servingInfo"` + // Address is the advertised host:port for client connections to etcd + Address string `json:"address"` + // PeerServingInfo describes how to start serving the etcd peer + PeerServingInfo ServingInfo `json:"peerServingInfo"` + // PeerAddress is the advertised host:port for peer connections to etcd + PeerAddress string `json:"peerAddress"` + + // StorageDir is the path to the etcd storage directory + StorageDir string `json:"storageDirectory"` +} + +// KubernetesMasterConfig holds the necessary configuration options for the Kubernetes master +type KubernetesMasterConfig struct { + // APILevels is a list of API levels that should be enabled on startup: v1 as examples + APILevels []string `json:"apiLevels"` + // DisabledAPIGroupVersions is a map of groups to the versions (or *) that should be disabled. + DisabledAPIGroupVersions map[string][]string `json:"disabledAPIGroupVersions"` + + // MasterIP is the public IP address of kubernetes stuff. If empty, the first result from net.InterfaceAddrs will be used. + MasterIP string `json:"masterIP"` + // MasterEndpointReconcileTTL sets the time to live in seconds of an endpoint record recorded by each master. The endpoints are checked + // at an interval that is 2/3 of this value and this value defaults to 15s if unset. In very large clusters, this value may be increased to + // reduce the possibility that the master endpoint record expires (due to other load on the etcd server) and causes masters to drop in and + // out of the kubernetes service record. It is not recommended to set this value below 15s. + MasterEndpointReconcileTTL int `json:"masterEndpointReconcileTTL"` + // ServicesSubnet is the subnet to use for assigning service IPs + ServicesSubnet string `json:"servicesSubnet"` + // ServicesNodePortRange is the range to use for assigning service public ports on a host. + ServicesNodePortRange string `json:"servicesNodePortRange"` + + // SchedulerConfigFile points to a file that describes how to set up the scheduler. If empty, you get the default scheduling rules. + SchedulerConfigFile string `json:"schedulerConfigFile"` + + // PodEvictionTimeout controls grace period for deleting pods on failed nodes. + // It takes valid time duration string. If empty, you get the default pod eviction timeout. + PodEvictionTimeout string `json:"podEvictionTimeout"` + // ProxyClientInfo specifies the client cert/key to use when proxying to pods + ProxyClientInfo CertInfo `json:"proxyClientInfo"` + + // APIServerArguments are key value pairs that will be passed directly to the Kube apiserver that match the apiservers's + // command line arguments. These are not migrated, but if you reference a value that does not exist the server will not + // start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations. + APIServerArguments ExtendedArguments `json:"apiServerArguments"` + // ControllerArguments are key value pairs that will be passed directly to the Kube controller manager that match the + // controller manager's command line arguments. These are not migrated, but if you reference a value that does not exist + // the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid + // configurations. + ControllerArguments ExtendedArguments `json:"controllerArguments"` + // SchedulerArguments are key value pairs that will be passed directly to the Kube scheduler that match the scheduler's + // command line arguments. These are not migrated, but if you reference a value that does not exist the server will not + // start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations. + SchedulerArguments ExtendedArguments `json:"schedulerArguments"` +} + +// CertInfo relates a certificate with a private key +type CertInfo struct { + // CertFile is a file containing a PEM-encoded certificate + CertFile string `json:"certFile"` + // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile + KeyFile string `json:"keyFile"` +} + +// PodManifestConfig holds the necessary configuration options for using pod manifests +type PodManifestConfig struct { + // Path specifies the path for the pod manifest file or directory + // If its a directory, its expected to contain on or more manifest files + // This is used by the Kubelet to create pods on the node + Path string `json:"path"` + // FileCheckIntervalSeconds is the interval in seconds for checking the manifest file(s) for new data + // The interval needs to be a positive value + FileCheckIntervalSeconds int64 `json:"fileCheckIntervalSeconds"` +} + +// StringSource allows specifying a string inline, or externally via env var or file. +// When it contains only a string value, it marshals to a simple JSON string. +type StringSource struct { + // StringSourceSpec specifies the string value, or external location + StringSourceSpec `json:",inline"` +} + +// StringSourceSpec specifies a string value, or external location +type StringSourceSpec struct { + // Value specifies the cleartext value, or an encrypted value if keyFile is specified. + Value string `json:"value"` + + // Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified. + Env string `json:"env"` + + // File references a file containing the cleartext value, or an encrypted value if a keyFile is specified. + File string `json:"file"` + + // KeyFile references a file containing the key to use to decrypt the value. + KeyFile string `json:"keyFile"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LDAPSyncConfig holds the necessary configuration options to define an LDAP group sync +type LDAPSyncConfig struct { + metav1.TypeMeta `json:",inline"` + // Host is the scheme, host and port of the LDAP server to connect to: + // scheme://host:port + URL string `json:"url"` + // BindDN is an optional DN to bind to the LDAP server with + BindDN string `json:"bindDN"` + // BindPassword is an optional password to bind with during the search phase. + BindPassword StringSource `json:"bindPassword"` + + // Insecure, if true, indicates the connection should not use TLS. + // Cannot be set to true with a URL scheme of "ldaps://" + // If false, "ldaps://" URLs connect using TLS, and "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830 + Insecure bool `json:"insecure"` + // CA is the optional trusted certificate authority bundle to use when making requests to the server + // If empty, the default system roots are used + CA string `json:"ca"` + + // LDAPGroupUIDToOpenShiftGroupNameMapping is an optional direct mapping of LDAP group UIDs to + // OpenShift Group names + LDAPGroupUIDToOpenShiftGroupNameMapping map[string]string `json:"groupUIDNameMapping"` + + // RFC2307Config holds the configuration for extracting data from an LDAP server set up in a fashion + // similar to RFC2307: first-class group and user entries, with group membership determined by a + // multi-valued attribute on the group entry listing its members + RFC2307Config *RFC2307Config `json:"rfc2307,omitempty"` + + // ActiveDirectoryConfig holds the configuration for extracting data from an LDAP server set up in a + // fashion similar to that used in Active Directory: first-class user entries, with group membership + // determined by a multi-valued attribute on members listing groups they are a member of + ActiveDirectoryConfig *ActiveDirectoryConfig `json:"activeDirectory,omitempty"` + + // AugmentedActiveDirectoryConfig holds the configuration for extracting data from an LDAP server + // set up in a fashion similar to that used in Active Directory as described above, with one addition: + // first-class group entries exist and are used to hold metadata but not group membership + AugmentedActiveDirectoryConfig *AugmentedActiveDirectoryConfig `json:"augmentedActiveDirectory,omitempty"` +} + +// RFC2307Config holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP +// server using the RFC2307 schema +type RFC2307Config struct { + // AllGroupsQuery holds the template for an LDAP query that returns group entries. + AllGroupsQuery LDAPQuery `json:"groupsQuery"` + + // GroupUIDAttributes defines which attribute on an LDAP group entry will be interpreted as its unique identifier. + // (ldapGroupUID) + GroupUIDAttribute string `json:"groupUIDAttribute"` + + // GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for + // an OpenShift group + GroupNameAttributes []string `json:"groupNameAttributes"` + + // GroupMembershipAttributes defines which attributes on an LDAP group entry will be interpreted as its members. + // The values contained in those attributes must be queryable by your UserUIDAttribute + GroupMembershipAttributes []string `json:"groupMembershipAttributes"` + + // AllUsersQuery holds the template for an LDAP query that returns user entries. + AllUsersQuery LDAPQuery `json:"usersQuery"` + + // UserUIDAttribute defines which attribute on an LDAP user entry will be interpreted as its unique identifier. + // It must correspond to values that will be found from the GroupMembershipAttributes + UserUIDAttribute string `json:"userUIDAttribute"` + + // UserNameAttributes defines which attributes on an LDAP user entry will be used, in order, as its OpenShift user name. + // The first attribute with a non-empty value is used. This should match your PreferredUsername setting for your LDAPPasswordIdentityProvider + UserNameAttributes []string `json:"userNameAttributes"` + + // TolerateMemberNotFoundErrors determines the behavior of the LDAP sync job when missing user entries are + // encountered. If 'true', an LDAP query for users that doesn't find any will be tolerated and an only + // and error will be logged. If 'false', the LDAP sync job will fail if a query for users doesn't find + // any. The default value is 'false'. Misconfigured LDAP sync jobs with this flag set to 'true' can cause + // group membership to be removed, so it is recommended to use this flag with caution. + TolerateMemberNotFoundErrors bool `json:"tolerateMemberNotFoundErrors"` + + // TolerateMemberOutOfScopeErrors determines the behavior of the LDAP sync job when out-of-scope user entries + // are encountered. If 'true', an LDAP query for a user that falls outside of the base DN given for the all + // user query will be tolerated and only an error will be logged. If 'false', the LDAP sync job will fail + // if a user query would search outside of the base DN specified by the all user query. Misconfigured LDAP + // sync jobs with this flag set to 'true' can result in groups missing users, so it is recommended to use + // this flag with caution. + TolerateMemberOutOfScopeErrors bool `json:"tolerateMemberOutOfScopeErrors"` +} + +// ActiveDirectoryConfig holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP +// server using the Active Directory schema +type ActiveDirectoryConfig struct { + // AllUsersQuery holds the template for an LDAP query that returns user entries. + AllUsersQuery LDAPQuery `json:"usersQuery"` + + // UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name. + UserNameAttributes []string `json:"userNameAttributes"` + + // GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted + // as the groups it is a member of + GroupMembershipAttributes []string `json:"groupMembershipAttributes"` +} + +// AugmentedActiveDirectoryConfig holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP +// server using the augmented Active Directory schema +type AugmentedActiveDirectoryConfig struct { + // AllUsersQuery holds the template for an LDAP query that returns user entries. + AllUsersQuery LDAPQuery `json:"usersQuery"` + + // UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name. + UserNameAttributes []string `json:"userNameAttributes"` + + // GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted + // as the groups it is a member of + GroupMembershipAttributes []string `json:"groupMembershipAttributes"` + + // AllGroupsQuery holds the template for an LDAP query that returns group entries. + AllGroupsQuery LDAPQuery `json:"groupsQuery"` + + // GroupUIDAttributes defines which attribute on an LDAP group entry will be interpreted as its unique identifier. + // (ldapGroupUID) + GroupUIDAttribute string `json:"groupUIDAttribute"` + + // GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for + // an OpenShift group + GroupNameAttributes []string `json:"groupNameAttributes"` +} + +// LDAPQuery holds the options necessary to build an LDAP query +type LDAPQuery struct { + // The DN of the branch of the directory where all searches should start from + BaseDN string `json:"baseDN"` + + // The (optional) scope of the search. Can be: + // base: only the base object, + // one: all object on the base level, + // sub: the entire subtree + // Defaults to the entire subtree if not set + Scope string `json:"scope"` + + // The (optional) behavior of the search with regards to alisases. Can be: + // never: never dereference aliases, + // search: only dereference in searching, + // base: only dereference in finding the base object, + // always: always dereference + // Defaults to always dereferencing if not set + DerefAliases string `json:"derefAliases"` + + // TimeLimit holds the limit of time in seconds that any request to the server can remain outstanding + // before the wait for a response is given up. If this is 0, no client-side limit is imposed + TimeLimit int `json:"timeout"` + + // Filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN + Filter string `json:"filter"` + + // PageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done. + PageSize int `json:"pageSize"` +} + +// AdmissionPluginConfig holds the necessary configuration options for admission plugins +type AdmissionPluginConfig struct { + // Location is the path to a configuration file that contains the plugin's + // configuration + Location string `json:"location"` + + // Configuration is an embedded configuration object to be used as the plugin's + // configuration. If present, it will be used instead of the path to the configuration file. + Configuration runtime.RawExtension `json:"configuration"` +} + +// AdmissionConfig holds the necessary configuration options for admission +type AdmissionConfig struct { + // PluginConfig allows specifying a configuration file per admission control plugin + PluginConfig map[string]*AdmissionPluginConfig `json:"pluginConfig"` + + // PluginOrderOverride is a list of admission control plugin names that will be installed + // on the master. Order is significant. If empty, a default list of plugins is used. + PluginOrderOverride []string `json:"pluginOrderOverride,omitempty"` +} + +// ControllerConfig holds configuration values for controllers +type ControllerConfig struct { + // Controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+ + // named 'foo', '-foo' disables the controller named 'foo'. + // Defaults to "*". + Controllers []string `json:"controllers"` + // Election defines the configuration for electing a controller instance to make changes to + // the cluster. If unspecified, the ControllerTTL value is checked to determine whether the + // legacy direct etcd election code will be used. + Election *ControllerElectionConfig `json:"election"` + // ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for + // pods fulfilling a service to serve with. + ServiceServingCert ServiceServingCert `json:"serviceServingCert"` +} + +// ControllerElectionConfig contains configuration values for deciding how a controller +// will be elected to act as leader. +type ControllerElectionConfig struct { + // LockName is the resource name used to act as the lock for determining which controller + // instance should lead. + LockName string `json:"lockName"` + // LockNamespace is the resource namespace used to act as the lock for determining which + // controller instance should lead. It defaults to "kube-system" + LockNamespace string `json:"lockNamespace"` + // LockResource is the group and resource name to use to coordinate for the controller lock. + // If unset, defaults to "configmaps". + LockResource GroupResource `json:"lockResource"` +} + +// GroupResource points to a resource by its name and API group. +type GroupResource struct { + // Group is the name of an API group + Group string `json:"group"` + // Resource is the name of a resource. + Resource string `json:"resource"` +} + +// ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for +// pods fulfilling a service to serve with. +type ServiceServingCert struct { + // Signer holds the signing information used to automatically sign serving certificates. + // If this value is nil, then certs are not signed automatically. + Signer *CertInfo `json:"signer"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DefaultAdmissionConfig can be used to enable or disable various admission plugins. +// When this type is present as the `configuration` object under `pluginConfig` and *if* the admission plugin supports it, +// this will cause an "off by default" admission plugin to be enabled +type DefaultAdmissionConfig struct { + metav1.TypeMeta `json:",inline"` + + // Disable turns off an admission plugin that is enabled by default. + Disable bool `json:"disable"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildDefaultsConfig controls the default information for Builds +type BuildDefaultsConfig struct { + metav1.TypeMeta `json:",inline"` + + // gitHTTPProxy is the location of the HTTPProxy for Git source + GitHTTPProxy string `json:"gitHTTPProxy,omitempty"` + + // gitHTTPSProxy is the location of the HTTPSProxy for Git source + GitHTTPSProxy string `json:"gitHTTPSProxy,omitempty"` + + // gitNoProxy is the list of domains for which the proxy should not be used + GitNoProxy string `json:"gitNoProxy,omitempty"` + + // env is a set of default environment variables that will be applied to the + // build if the specified variables do not exist on the build + Env []corev1.EnvVar `json:"env,omitempty"` + + // sourceStrategyDefaults are default values that apply to builds using the + // source strategy. + SourceStrategyDefaults *SourceStrategyDefaultsConfig `json:"sourceStrategyDefaults,omitempty"` + + // imageLabels is a list of labels that are applied to the resulting image. + // User can override a default label by providing a label with the same name in their + // Build/BuildConfig. + ImageLabels []buildv1.ImageLabel `json:"imageLabels,omitempty"` + + // nodeSelector is a selector which must be true for the build pod to fit on a node + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // annotations are annotations that will be added to the build pod + Annotations map[string]string `json:"annotations,omitempty"` + + // resources defines resource requirements to execute the build. + Resources corev1.ResourceRequirements `json:"resources,omitempty"` +} + +// SourceStrategyDefaultsConfig contains values that apply to builds using the +// source strategy. +type SourceStrategyDefaultsConfig struct { + + // incremental indicates if s2i build strategies should perform an incremental + // build or not + Incremental *bool `json:"incremental,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildOverridesConfig controls override settings for builds +type BuildOverridesConfig struct { + metav1.TypeMeta `json:",inline"` + + // forcePull indicates whether the build strategy should always be set to ForcePull=true + ForcePull bool `json:"forcePull"` + + // imageLabels is a list of labels that are applied to the resulting image. + // If user provided a label in their Build/BuildConfig with the same name as one in this + // list, the user's label will be overwritten. + ImageLabels []buildv1.ImageLabel `json:"imageLabels,omitempty"` + + // nodeSelector is a selector which must be true for the build pod to fit on a node + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // annotations are annotations that will be added to the build pod + Annotations map[string]string `json:"annotations,omitempty"` + + // tolerations is a list of Tolerations that will override any existing + // tolerations set on a build pod. + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..b65c4beb7fe78df282f2d15532718f836c9aa7cc --- /dev/null +++ b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.deepcopy.go @@ -0,0 +1,2142 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryConfig) DeepCopyInto(out *ActiveDirectoryConfig) { + *out = *in + out.AllUsersQuery = in.AllUsersQuery + if in.UserNameAttributes != nil { + in, out := &in.UserNameAttributes, &out.UserNameAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GroupMembershipAttributes != nil { + in, out := &in.GroupMembershipAttributes, &out.GroupMembershipAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryConfig. +func (in *ActiveDirectoryConfig) DeepCopy() *ActiveDirectoryConfig { + if in == nil { + return nil + } + out := new(ActiveDirectoryConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionConfig) DeepCopyInto(out *AdmissionConfig) { + *out = *in + if in.PluginConfig != nil { + in, out := &in.PluginConfig, &out.PluginConfig + *out = make(map[string]*AdmissionPluginConfig, len(*in)) + for key, val := range *in { + var outVal *AdmissionPluginConfig + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(AdmissionPluginConfig) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + if in.PluginOrderOverride != nil { + in, out := &in.PluginOrderOverride, &out.PluginOrderOverride + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfig. +func (in *AdmissionConfig) DeepCopy() *AdmissionConfig { + if in == nil { + return nil + } + out := new(AdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionPluginConfig) DeepCopyInto(out *AdmissionPluginConfig) { + *out = *in + in.Configuration.DeepCopyInto(&out.Configuration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfig. +func (in *AdmissionPluginConfig) DeepCopy() *AdmissionPluginConfig { + if in == nil { + return nil + } + out := new(AdmissionPluginConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregatorConfig) DeepCopyInto(out *AggregatorConfig) { + *out = *in + out.ProxyClientInfo = in.ProxyClientInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregatorConfig. +func (in *AggregatorConfig) DeepCopy() *AggregatorConfig { + if in == nil { + return nil + } + out := new(AggregatorConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowAllPasswordIdentityProvider) DeepCopyInto(out *AllowAllPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowAllPasswordIdentityProvider. +func (in *AllowAllPasswordIdentityProvider) DeepCopy() *AllowAllPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(AllowAllPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AllowAllPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in AllowedRegistries) DeepCopyInto(out *AllowedRegistries) { + { + in := &in + *out = make(AllowedRegistries, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedRegistries. +func (in AllowedRegistries) DeepCopy() AllowedRegistries { + if in == nil { + return nil + } + out := new(AllowedRegistries) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditConfig) DeepCopyInto(out *AuditConfig) { + *out = *in + in.PolicyConfiguration.DeepCopyInto(&out.PolicyConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig. +func (in *AuditConfig) DeepCopy() *AuditConfig { + if in == nil { + return nil + } + out := new(AuditConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AugmentedActiveDirectoryConfig) DeepCopyInto(out *AugmentedActiveDirectoryConfig) { + *out = *in + out.AllUsersQuery = in.AllUsersQuery + if in.UserNameAttributes != nil { + in, out := &in.UserNameAttributes, &out.UserNameAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GroupMembershipAttributes != nil { + in, out := &in.GroupMembershipAttributes, &out.GroupMembershipAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.AllGroupsQuery = in.AllGroupsQuery + if in.GroupNameAttributes != nil { + in, out := &in.GroupNameAttributes, &out.GroupNameAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AugmentedActiveDirectoryConfig. +func (in *AugmentedActiveDirectoryConfig) DeepCopy() *AugmentedActiveDirectoryConfig { + if in == nil { + return nil + } + out := new(AugmentedActiveDirectoryConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthPasswordIdentityProvider) DeepCopyInto(out *BasicAuthPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.RemoteConnectionInfo = in.RemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthPasswordIdentityProvider. +func (in *BasicAuthPasswordIdentityProvider) DeepCopy() *BasicAuthPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(BasicAuthPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BasicAuthPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildDefaultsConfig) DeepCopyInto(out *BuildDefaultsConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceStrategyDefaults != nil { + in, out := &in.SourceStrategyDefaults, &out.SourceStrategyDefaults + *out = new(SourceStrategyDefaultsConfig) + (*in).DeepCopyInto(*out) + } + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]buildv1.ImageLabel, len(*in)) + copy(*out, *in) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildDefaultsConfig. +func (in *BuildDefaultsConfig) DeepCopy() *BuildDefaultsConfig { + if in == nil { + return nil + } + out := new(BuildDefaultsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildDefaultsConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildOverridesConfig) DeepCopyInto(out *BuildOverridesConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]buildv1.ImageLabel, len(*in)) + copy(*out, *in) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOverridesConfig. +func (in *BuildOverridesConfig) DeepCopy() *BuildOverridesConfig { + if in == nil { + return nil + } + out := new(BuildOverridesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildOverridesConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertInfo) DeepCopyInto(out *CertInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertInfo. +func (in *CertInfo) DeepCopy() *CertInfo { + if in == nil { + return nil + } + out := new(CertInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientConnectionOverrides) DeepCopyInto(out *ClientConnectionOverrides) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionOverrides. +func (in *ClientConnectionOverrides) DeepCopy() *ClientConnectionOverrides { + if in == nil { + return nil + } + out := new(ClientConnectionOverrides) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerConfig) DeepCopyInto(out *ControllerConfig) { + *out = *in + if in.Controllers != nil { + in, out := &in.Controllers, &out.Controllers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Election != nil { + in, out := &in.Election, &out.Election + *out = new(ControllerElectionConfig) + **out = **in + } + in.ServiceServingCert.DeepCopyInto(&out.ServiceServingCert) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerConfig. +func (in *ControllerConfig) DeepCopy() *ControllerConfig { + if in == nil { + return nil + } + out := new(ControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerElectionConfig) DeepCopyInto(out *ControllerElectionConfig) { + *out = *in + out.LockResource = in.LockResource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerElectionConfig. +func (in *ControllerElectionConfig) DeepCopy() *ControllerElectionConfig { + if in == nil { + return nil + } + out := new(ControllerElectionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSConfig) DeepCopyInto(out *DNSConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSConfig. +func (in *DNSConfig) DeepCopy() *DNSConfig { + if in == nil { + return nil + } + out := new(DNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultAdmissionConfig) DeepCopyInto(out *DefaultAdmissionConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultAdmissionConfig. +func (in *DefaultAdmissionConfig) DeepCopy() *DefaultAdmissionConfig { + if in == nil { + return nil + } + out := new(DefaultAdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DefaultAdmissionConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DenyAllPasswordIdentityProvider) DeepCopyInto(out *DenyAllPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DenyAllPasswordIdentityProvider. +func (in *DenyAllPasswordIdentityProvider) DeepCopy() *DenyAllPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(DenyAllPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DenyAllPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerConfig) DeepCopyInto(out *DockerConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig. +func (in *DockerConfig) DeepCopy() *DockerConfig { + if in == nil { + return nil + } + out := new(DockerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdConfig) DeepCopyInto(out *EtcdConfig) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + in.PeerServingInfo.DeepCopyInto(&out.PeerServingInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConfig. +func (in *EtcdConfig) DeepCopy() *EtcdConfig { + if in == nil { + return nil + } + out := new(EtcdConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdConnectionInfo) DeepCopyInto(out *EtcdConnectionInfo) { + *out = *in + if in.URLs != nil { + in, out := &in.URLs, &out.URLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConnectionInfo. +func (in *EtcdConnectionInfo) DeepCopy() *EtcdConnectionInfo { + if in == nil { + return nil + } + out := new(EtcdConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdStorageConfig) DeepCopyInto(out *EtcdStorageConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStorageConfig. +func (in *EtcdStorageConfig) DeepCopy() *EtcdStorageConfig { + if in == nil { + return nil + } + out := new(EtcdStorageConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ExtendedArguments) DeepCopyInto(out *ExtendedArguments) { + { + in := &in + *out = make(ExtendedArguments, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtendedArguments. +func (in ExtendedArguments) DeepCopy() ExtendedArguments { + if in == nil { + return nil + } + out := new(ExtendedArguments) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in FeatureList) DeepCopyInto(out *FeatureList) { + { + in := &in + *out = make(FeatureList, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureList. +func (in FeatureList) DeepCopy() FeatureList { + if in == nil { + return nil + } + out := new(FeatureList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitHubIdentityProvider) DeepCopyInto(out *GitHubIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + if in.Organizations != nil { + in, out := &in.Organizations, &out.Organizations + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Teams != nil { + in, out := &in.Teams, &out.Teams + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubIdentityProvider. +func (in *GitHubIdentityProvider) DeepCopy() *GitHubIdentityProvider { + if in == nil { + return nil + } + out := new(GitHubIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitHubIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLabIdentityProvider) DeepCopyInto(out *GitLabIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + if in.Legacy != nil { + in, out := &in.Legacy, &out.Legacy + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabIdentityProvider. +func (in *GitLabIdentityProvider) DeepCopy() *GitLabIdentityProvider { + if in == nil { + return nil + } + out := new(GitLabIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitLabIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleIdentityProvider) DeepCopyInto(out *GoogleIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleIdentityProvider. +func (in *GoogleIdentityProvider) DeepCopy() *GoogleIdentityProvider { + if in == nil { + return nil + } + out := new(GoogleIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GoogleIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GrantConfig) DeepCopyInto(out *GrantConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrantConfig. +func (in *GrantConfig) DeepCopy() *GrantConfig { + if in == nil { + return nil + } + out := new(GrantConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupResource) DeepCopyInto(out *GroupResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResource. +func (in *GroupResource) DeepCopy() *GroupResource { + if in == nil { + return nil + } + out := new(GroupResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTPasswdPasswordIdentityProvider) DeepCopyInto(out *HTPasswdPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTPasswdPasswordIdentityProvider. +func (in *HTPasswdPasswordIdentityProvider) DeepCopy() *HTPasswdPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(HTPasswdPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTPasswdPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPServingInfo) DeepCopyInto(out *HTTPServingInfo) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPServingInfo. +func (in *HTTPServingInfo) DeepCopy() *HTTPServingInfo { + if in == nil { + return nil + } + out := new(HTTPServingInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) { + *out = *in + in.Provider.DeepCopyInto(&out.Provider) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProvider. +func (in *IdentityProvider) DeepCopy() *IdentityProvider { + if in == nil { + return nil + } + out := new(IdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfig) DeepCopyInto(out *ImageConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfig. +func (in *ImageConfig) DeepCopy() *ImageConfig { + if in == nil { + return nil + } + out := new(ImageConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyConfig) DeepCopyInto(out *ImagePolicyConfig) { + *out = *in + if in.AllowedRegistriesForImport != nil { + in, out := &in.AllowedRegistriesForImport, &out.AllowedRegistriesForImport + *out = new(AllowedRegistries) + if **in != nil { + in, out := *in, *out + *out = make([]RegistryLocation, len(*in)) + copy(*out, *in) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyConfig. +func (in *ImagePolicyConfig) DeepCopy() *ImagePolicyConfig { + if in == nil { + return nil + } + out := new(ImagePolicyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JenkinsPipelineConfig) DeepCopyInto(out *JenkinsPipelineConfig) { + *out = *in + if in.AutoProvisionEnabled != nil { + in, out := &in.AutoProvisionEnabled, &out.AutoProvisionEnabled + *out = new(bool) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JenkinsPipelineConfig. +func (in *JenkinsPipelineConfig) DeepCopy() *JenkinsPipelineConfig { + if in == nil { + return nil + } + out := new(JenkinsPipelineConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeystonePasswordIdentityProvider) DeepCopyInto(out *KeystonePasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.RemoteConnectionInfo = in.RemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystonePasswordIdentityProvider. +func (in *KeystonePasswordIdentityProvider) DeepCopy() *KeystonePasswordIdentityProvider { + if in == nil { + return nil + } + out := new(KeystonePasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KeystonePasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConnectionInfo) DeepCopyInto(out *KubeletConnectionInfo) { + *out = *in + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConnectionInfo. +func (in *KubeletConnectionInfo) DeepCopy() *KubeletConnectionInfo { + if in == nil { + return nil + } + out := new(KubeletConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesMasterConfig) DeepCopyInto(out *KubernetesMasterConfig) { + *out = *in + if in.APILevels != nil { + in, out := &in.APILevels, &out.APILevels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DisabledAPIGroupVersions != nil { + in, out := &in.DisabledAPIGroupVersions, &out.DisabledAPIGroupVersions + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + out.ProxyClientInfo = in.ProxyClientInfo + if in.APIServerArguments != nil { + in, out := &in.APIServerArguments, &out.APIServerArguments + *out = make(ExtendedArguments, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.ControllerArguments != nil { + in, out := &in.ControllerArguments, &out.ControllerArguments + *out = make(ExtendedArguments, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.SchedulerArguments != nil { + in, out := &in.SchedulerArguments, &out.SchedulerArguments + *out = make(ExtendedArguments, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesMasterConfig. +func (in *KubernetesMasterConfig) DeepCopy() *KubernetesMasterConfig { + if in == nil { + return nil + } + out := new(KubernetesMasterConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPAttributeMapping) DeepCopyInto(out *LDAPAttributeMapping) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPAttributeMapping. +func (in *LDAPAttributeMapping) DeepCopy() *LDAPAttributeMapping { + if in == nil { + return nil + } + out := new(LDAPAttributeMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPPasswordIdentityProvider) DeepCopyInto(out *LDAPPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.BindPassword = in.BindPassword + in.Attributes.DeepCopyInto(&out.Attributes) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPPasswordIdentityProvider. +func (in *LDAPPasswordIdentityProvider) DeepCopy() *LDAPPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(LDAPPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LDAPPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPQuery) DeepCopyInto(out *LDAPQuery) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPQuery. +func (in *LDAPQuery) DeepCopy() *LDAPQuery { + if in == nil { + return nil + } + out := new(LDAPQuery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPSyncConfig) DeepCopyInto(out *LDAPSyncConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + out.BindPassword = in.BindPassword + if in.LDAPGroupUIDToOpenShiftGroupNameMapping != nil { + in, out := &in.LDAPGroupUIDToOpenShiftGroupNameMapping, &out.LDAPGroupUIDToOpenShiftGroupNameMapping + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.RFC2307Config != nil { + in, out := &in.RFC2307Config, &out.RFC2307Config + *out = new(RFC2307Config) + (*in).DeepCopyInto(*out) + } + if in.ActiveDirectoryConfig != nil { + in, out := &in.ActiveDirectoryConfig, &out.ActiveDirectoryConfig + *out = new(ActiveDirectoryConfig) + (*in).DeepCopyInto(*out) + } + if in.AugmentedActiveDirectoryConfig != nil { + in, out := &in.AugmentedActiveDirectoryConfig, &out.AugmentedActiveDirectoryConfig + *out = new(AugmentedActiveDirectoryConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPSyncConfig. +func (in *LDAPSyncConfig) DeepCopy() *LDAPSyncConfig { + if in == nil { + return nil + } + out := new(LDAPSyncConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LDAPSyncConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalQuota) DeepCopyInto(out *LocalQuota) { + *out = *in + if in.PerFSGroup != nil { + in, out := &in.PerFSGroup, &out.PerFSGroup + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalQuota. +func (in *LocalQuota) DeepCopy() *LocalQuota { + if in == nil { + return nil + } + out := new(LocalQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterAuthConfig) DeepCopyInto(out *MasterAuthConfig) { + *out = *in + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = new(RequestHeaderAuthenticationOptions) + (*in).DeepCopyInto(*out) + } + if in.WebhookTokenAuthenticators != nil { + in, out := &in.WebhookTokenAuthenticators, &out.WebhookTokenAuthenticators + *out = make([]WebhookTokenAuthenticator, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterAuthConfig. +func (in *MasterAuthConfig) DeepCopy() *MasterAuthConfig { + if in == nil { + return nil + } + out := new(MasterAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterClients) DeepCopyInto(out *MasterClients) { + *out = *in + if in.OpenShiftLoopbackClientConnectionOverrides != nil { + in, out := &in.OpenShiftLoopbackClientConnectionOverrides, &out.OpenShiftLoopbackClientConnectionOverrides + *out = new(ClientConnectionOverrides) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterClients. +func (in *MasterClients) DeepCopy() *MasterClients { + if in == nil { + return nil + } + out := new(MasterClients) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterConfig) DeepCopyInto(out *MasterConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + in.AuthConfig.DeepCopyInto(&out.AuthConfig) + out.AggregatorConfig = in.AggregatorConfig + if in.CORSAllowedOrigins != nil { + in, out := &in.CORSAllowedOrigins, &out.CORSAllowedOrigins + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APILevels != nil { + in, out := &in.APILevels, &out.APILevels + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.AdmissionConfig.DeepCopyInto(&out.AdmissionConfig) + in.ControllerConfig.DeepCopyInto(&out.ControllerConfig) + out.EtcdStorageConfig = in.EtcdStorageConfig + in.EtcdClientInfo.DeepCopyInto(&out.EtcdClientInfo) + out.KubeletClientInfo = in.KubeletClientInfo + in.KubernetesMasterConfig.DeepCopyInto(&out.KubernetesMasterConfig) + if in.EtcdConfig != nil { + in, out := &in.EtcdConfig, &out.EtcdConfig + *out = new(EtcdConfig) + (*in).DeepCopyInto(*out) + } + if in.OAuthConfig != nil { + in, out := &in.OAuthConfig, &out.OAuthConfig + *out = new(OAuthConfig) + (*in).DeepCopyInto(*out) + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(DNSConfig) + **out = **in + } + in.ServiceAccountConfig.DeepCopyInto(&out.ServiceAccountConfig) + in.MasterClients.DeepCopyInto(&out.MasterClients) + out.ImageConfig = in.ImageConfig + in.ImagePolicyConfig.DeepCopyInto(&out.ImagePolicyConfig) + in.PolicyConfig.DeepCopyInto(&out.PolicyConfig) + in.ProjectConfig.DeepCopyInto(&out.ProjectConfig) + out.RoutingConfig = in.RoutingConfig + in.NetworkConfig.DeepCopyInto(&out.NetworkConfig) + in.VolumeConfig.DeepCopyInto(&out.VolumeConfig) + in.JenkinsPipelineConfig.DeepCopyInto(&out.JenkinsPipelineConfig) + in.AuditConfig.DeepCopyInto(&out.AuditConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterConfig. +func (in *MasterConfig) DeepCopy() *MasterConfig { + if in == nil { + return nil + } + out := new(MasterConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MasterConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterNetworkConfig) DeepCopyInto(out *MasterNetworkConfig) { + *out = *in + if in.ClusterNetworks != nil { + in, out := &in.ClusterNetworks, &out.ClusterNetworks + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ExternalIPNetworkCIDRs != nil { + in, out := &in.ExternalIPNetworkCIDRs, &out.ExternalIPNetworkCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterNetworkConfig. +func (in *MasterNetworkConfig) DeepCopy() *MasterNetworkConfig { + if in == nil { + return nil + } + out := new(MasterNetworkConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterVolumeConfig) DeepCopyInto(out *MasterVolumeConfig) { + *out = *in + if in.DynamicProvisioningEnabled != nil { + in, out := &in.DynamicProvisioningEnabled, &out.DynamicProvisioningEnabled + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterVolumeConfig. +func (in *MasterVolumeConfig) DeepCopy() *MasterVolumeConfig { + if in == nil { + return nil + } + out := new(MasterVolumeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedCertificate) DeepCopyInto(out *NamedCertificate) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCertificate. +func (in *NamedCertificate) DeepCopy() *NamedCertificate { + if in == nil { + return nil + } + out := new(NamedCertificate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAuthConfig) DeepCopyInto(out *NodeAuthConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAuthConfig. +func (in *NodeAuthConfig) DeepCopy() *NodeAuthConfig { + if in == nil { + return nil + } + out := new(NodeAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfig) DeepCopyInto(out *NodeConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + if in.MasterClientConnectionOverrides != nil { + in, out := &in.MasterClientConnectionOverrides, &out.MasterClientConnectionOverrides + *out = new(ClientConnectionOverrides) + **out = **in + } + if in.DNSNameservers != nil { + in, out := &in.DNSNameservers, &out.DNSNameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.NetworkConfig = in.NetworkConfig + out.ImageConfig = in.ImageConfig + if in.PodManifestConfig != nil { + in, out := &in.PodManifestConfig, &out.PodManifestConfig + *out = new(PodManifestConfig) + **out = **in + } + out.AuthConfig = in.AuthConfig + out.DockerConfig = in.DockerConfig + if in.KubeletArguments != nil { + in, out := &in.KubeletArguments, &out.KubeletArguments + *out = make(ExtendedArguments, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.ProxyArguments != nil { + in, out := &in.ProxyArguments, &out.ProxyArguments + *out = make(ExtendedArguments, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.EnableUnidling != nil { + in, out := &in.EnableUnidling, &out.EnableUnidling + *out = new(bool) + **out = **in + } + in.VolumeConfig.DeepCopyInto(&out.VolumeConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfig. +func (in *NodeConfig) DeepCopy() *NodeConfig { + if in == nil { + return nil + } + out := new(NodeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeNetworkConfig) DeepCopyInto(out *NodeNetworkConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeNetworkConfig. +func (in *NodeNetworkConfig) DeepCopy() *NodeNetworkConfig { + if in == nil { + return nil + } + out := new(NodeNetworkConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeVolumeConfig) DeepCopyInto(out *NodeVolumeConfig) { + *out = *in + in.LocalQuota.DeepCopyInto(&out.LocalQuota) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeVolumeConfig. +func (in *NodeVolumeConfig) DeepCopy() *NodeVolumeConfig { + if in == nil { + return nil + } + out := new(NodeVolumeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthConfig) DeepCopyInto(out *OAuthConfig) { + *out = *in + if in.MasterCA != nil { + in, out := &in.MasterCA, &out.MasterCA + *out = new(string) + **out = **in + } + if in.IdentityProviders != nil { + in, out := &in.IdentityProviders, &out.IdentityProviders + *out = make([]IdentityProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.GrantConfig = in.GrantConfig + if in.SessionConfig != nil { + in, out := &in.SessionConfig, &out.SessionConfig + *out = new(SessionConfig) + **out = **in + } + in.TokenConfig.DeepCopyInto(&out.TokenConfig) + if in.Templates != nil { + in, out := &in.Templates, &out.Templates + *out = new(OAuthTemplates) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthConfig. +func (in *OAuthConfig) DeepCopy() *OAuthConfig { + if in == nil { + return nil + } + out := new(OAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthTemplates) DeepCopyInto(out *OAuthTemplates) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTemplates. +func (in *OAuthTemplates) DeepCopy() *OAuthTemplates { + if in == nil { + return nil + } + out := new(OAuthTemplates) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDClaims. +func (in *OpenIDClaims) DeepCopy() *OpenIDClaims { + if in == nil { + return nil + } + out := new(OpenIDClaims) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDIdentityProvider) DeepCopyInto(out *OpenIDIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + if in.ExtraScopes != nil { + in, out := &in.ExtraScopes, &out.ExtraScopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraAuthorizeParameters != nil { + in, out := &in.ExtraAuthorizeParameters, &out.ExtraAuthorizeParameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.URLs = in.URLs + in.Claims.DeepCopyInto(&out.Claims) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDIdentityProvider. +func (in *OpenIDIdentityProvider) DeepCopy() *OpenIDIdentityProvider { + if in == nil { + return nil + } + out := new(OpenIDIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenIDIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDURLs) DeepCopyInto(out *OpenIDURLs) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDURLs. +func (in *OpenIDURLs) DeepCopy() *OpenIDURLs { + if in == nil { + return nil + } + out := new(OpenIDURLs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodManifestConfig) DeepCopyInto(out *PodManifestConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodManifestConfig. +func (in *PodManifestConfig) DeepCopy() *PodManifestConfig { + if in == nil { + return nil + } + out := new(PodManifestConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyConfig) DeepCopyInto(out *PolicyConfig) { + *out = *in + in.UserAgentMatchingConfig.DeepCopyInto(&out.UserAgentMatchingConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyConfig. +func (in *PolicyConfig) DeepCopy() *PolicyConfig { + if in == nil { + return nil + } + out := new(PolicyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectConfig) DeepCopyInto(out *ProjectConfig) { + *out = *in + if in.SecurityAllocator != nil { + in, out := &in.SecurityAllocator, &out.SecurityAllocator + *out = new(SecurityAllocator) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectConfig. +func (in *ProjectConfig) DeepCopy() *ProjectConfig { + if in == nil { + return nil + } + out := new(ProjectConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RFC2307Config) DeepCopyInto(out *RFC2307Config) { + *out = *in + out.AllGroupsQuery = in.AllGroupsQuery + if in.GroupNameAttributes != nil { + in, out := &in.GroupNameAttributes, &out.GroupNameAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GroupMembershipAttributes != nil { + in, out := &in.GroupMembershipAttributes, &out.GroupMembershipAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.AllUsersQuery = in.AllUsersQuery + if in.UserNameAttributes != nil { + in, out := &in.UserNameAttributes, &out.UserNameAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RFC2307Config. +func (in *RFC2307Config) DeepCopy() *RFC2307Config { + if in == nil { + return nil + } + out := new(RFC2307Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryLocation. +func (in *RegistryLocation) DeepCopy() *RegistryLocation { + if in == nil { + return nil + } + out := new(RegistryLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteConnectionInfo) DeepCopyInto(out *RemoteConnectionInfo) { + *out = *in + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteConnectionInfo. +func (in *RemoteConnectionInfo) DeepCopy() *RemoteConnectionInfo { + if in == nil { + return nil + } + out := new(RemoteConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderAuthenticationOptions) DeepCopyInto(out *RequestHeaderAuthenticationOptions) { + *out = *in + if in.ClientCommonNames != nil { + in, out := &in.ClientCommonNames, &out.ClientCommonNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UsernameHeaders != nil { + in, out := &in.UsernameHeaders, &out.UsernameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GroupHeaders != nil { + in, out := &in.GroupHeaders, &out.GroupHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraHeaderPrefixes != nil { + in, out := &in.ExtraHeaderPrefixes, &out.ExtraHeaderPrefixes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderAuthenticationOptions. +func (in *RequestHeaderAuthenticationOptions) DeepCopy() *RequestHeaderAuthenticationOptions { + if in == nil { + return nil + } + out := new(RequestHeaderAuthenticationOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderIdentityProvider) DeepCopyInto(out *RequestHeaderIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ClientCommonNames != nil { + in, out := &in.ClientCommonNames, &out.ClientCommonNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsernameHeaders != nil { + in, out := &in.PreferredUsernameHeaders, &out.PreferredUsernameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NameHeaders != nil { + in, out := &in.NameHeaders, &out.NameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EmailHeaders != nil { + in, out := &in.EmailHeaders, &out.EmailHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderIdentityProvider. +func (in *RequestHeaderIdentityProvider) DeepCopy() *RequestHeaderIdentityProvider { + if in == nil { + return nil + } + out := new(RequestHeaderIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RequestHeaderIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingConfig) DeepCopyInto(out *RoutingConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingConfig. +func (in *RoutingConfig) DeepCopy() *RoutingConfig { + if in == nil { + return nil + } + out := new(RoutingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityAllocator) DeepCopyInto(out *SecurityAllocator) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityAllocator. +func (in *SecurityAllocator) DeepCopy() *SecurityAllocator { + if in == nil { + return nil + } + out := new(SecurityAllocator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountConfig) DeepCopyInto(out *ServiceAccountConfig) { + *out = *in + if in.ManagedNames != nil { + in, out := &in.ManagedNames, &out.ManagedNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PublicKeyFiles != nil { + in, out := &in.PublicKeyFiles, &out.PublicKeyFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountConfig. +func (in *ServiceAccountConfig) DeepCopy() *ServiceAccountConfig { + if in == nil { + return nil + } + out := new(ServiceAccountConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceServingCert) DeepCopyInto(out *ServiceServingCert) { + *out = *in + if in.Signer != nil { + in, out := &in.Signer, &out.Signer + *out = new(CertInfo) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceServingCert. +func (in *ServiceServingCert) DeepCopy() *ServiceServingCert { + if in == nil { + return nil + } + out := new(ServiceServingCert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServingInfo) DeepCopyInto(out *ServingInfo) { + *out = *in + out.CertInfo = in.CertInfo + if in.NamedCertificates != nil { + in, out := &in.NamedCertificates, &out.NamedCertificates + *out = make([]NamedCertificate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CipherSuites != nil { + in, out := &in.CipherSuites, &out.CipherSuites + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServingInfo. +func (in *ServingInfo) DeepCopy() *ServingInfo { + if in == nil { + return nil + } + out := new(ServingInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionConfig) DeepCopyInto(out *SessionConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionConfig. +func (in *SessionConfig) DeepCopy() *SessionConfig { + if in == nil { + return nil + } + out := new(SessionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionSecret) DeepCopyInto(out *SessionSecret) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionSecret. +func (in *SessionSecret) DeepCopy() *SessionSecret { + if in == nil { + return nil + } + out := new(SessionSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionSecrets) DeepCopyInto(out *SessionSecrets) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SessionSecret, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionSecrets. +func (in *SessionSecrets) DeepCopy() *SessionSecrets { + if in == nil { + return nil + } + out := new(SessionSecrets) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SessionSecrets) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceStrategyDefaultsConfig) DeepCopyInto(out *SourceStrategyDefaultsConfig) { + *out = *in + if in.Incremental != nil { + in, out := &in.Incremental, &out.Incremental + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceStrategyDefaultsConfig. +func (in *SourceStrategyDefaultsConfig) DeepCopy() *SourceStrategyDefaultsConfig { + if in == nil { + return nil + } + out := new(SourceStrategyDefaultsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringSource) DeepCopyInto(out *StringSource) { + *out = *in + out.StringSourceSpec = in.StringSourceSpec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSource. +func (in *StringSource) DeepCopy() *StringSource { + if in == nil { + return nil + } + out := new(StringSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringSourceSpec) DeepCopyInto(out *StringSourceSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSourceSpec. +func (in *StringSourceSpec) DeepCopy() *StringSourceSpec { + if in == nil { + return nil + } + out := new(StringSourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenConfig) DeepCopyInto(out *TokenConfig) { + *out = *in + if in.AccessTokenInactivityTimeoutSeconds != nil { + in, out := &in.AccessTokenInactivityTimeoutSeconds, &out.AccessTokenInactivityTimeoutSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenConfig. +func (in *TokenConfig) DeepCopy() *TokenConfig { + if in == nil { + return nil + } + out := new(TokenConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAgentDenyRule) DeepCopyInto(out *UserAgentDenyRule) { + *out = *in + in.UserAgentMatchRule.DeepCopyInto(&out.UserAgentMatchRule) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentDenyRule. +func (in *UserAgentDenyRule) DeepCopy() *UserAgentDenyRule { + if in == nil { + return nil + } + out := new(UserAgentDenyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAgentMatchRule) DeepCopyInto(out *UserAgentMatchRule) { + *out = *in + if in.HTTPVerbs != nil { + in, out := &in.HTTPVerbs, &out.HTTPVerbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentMatchRule. +func (in *UserAgentMatchRule) DeepCopy() *UserAgentMatchRule { + if in == nil { + return nil + } + out := new(UserAgentMatchRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAgentMatchingConfig) DeepCopyInto(out *UserAgentMatchingConfig) { + *out = *in + if in.RequiredClients != nil { + in, out := &in.RequiredClients, &out.RequiredClients + *out = make([]UserAgentMatchRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeniedClients != nil { + in, out := &in.DeniedClients, &out.DeniedClients + *out = make([]UserAgentDenyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentMatchingConfig. +func (in *UserAgentMatchingConfig) DeepCopy() *UserAgentMatchingConfig { + if in == nil { + return nil + } + out := new(UserAgentMatchingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookTokenAuthenticator) DeepCopyInto(out *WebhookTokenAuthenticator) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookTokenAuthenticator. +func (in *WebhookTokenAuthenticator) DeepCopy() *WebhookTokenAuthenticator { + if in == nil { + return nil + } + out := new(WebhookTokenAuthenticator) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..75ee2a42b1bfc1680fb78a2667916292f6b86bca --- /dev/null +++ b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,977 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ActiveDirectoryConfig = map[string]string{ + "": "ActiveDirectoryConfig holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP server using the Active Directory schema", + "usersQuery": "AllUsersQuery holds the template for an LDAP query that returns user entries.", + "userNameAttributes": "UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.", + "groupMembershipAttributes": "GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of", +} + +func (ActiveDirectoryConfig) SwaggerDoc() map[string]string { + return map_ActiveDirectoryConfig +} + +var map_AdmissionConfig = map[string]string{ + "": "AdmissionConfig holds the necessary configuration options for admission", + "pluginConfig": "PluginConfig allows specifying a configuration file per admission control plugin", + "pluginOrderOverride": "PluginOrderOverride is a list of admission control plugin names that will be installed on the master. Order is significant. If empty, a default list of plugins is used.", +} + +func (AdmissionConfig) SwaggerDoc() map[string]string { + return map_AdmissionConfig +} + +var map_AdmissionPluginConfig = map[string]string{ + "": "AdmissionPluginConfig holds the necessary configuration options for admission plugins", + "location": "Location is the path to a configuration file that contains the plugin's configuration", + "configuration": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", +} + +func (AdmissionPluginConfig) SwaggerDoc() map[string]string { + return map_AdmissionPluginConfig +} + +var map_AggregatorConfig = map[string]string{ + "": "AggregatorConfig holds information required to make the aggregator function.", + "proxyClientInfo": "ProxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers", +} + +func (AggregatorConfig) SwaggerDoc() map[string]string { + return map_AggregatorConfig +} + +var map_AllowAllPasswordIdentityProvider = map[string]string{ + "": "AllowAllPasswordIdentityProvider provides identities for users authenticating using non-empty passwords", +} + +func (AllowAllPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_AllowAllPasswordIdentityProvider +} + +var map_AuditConfig = map[string]string{ + "": "AuditConfig holds configuration for the audit capabilities", + "enabled": "If this flag is set, audit log will be printed in the logs. The logs contains, method, user and a requested URL.", + "auditFilePath": "All requests coming to the apiserver will be logged to this file.", + "maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.", + "maximumRetainedFiles": "Maximum number of old log files to retain.", + "maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.", + "policyFile": "PolicyFile is a path to the file that defines the audit policy configuration.", + "policyConfiguration": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", + "logFormat": "Format of saved audits (legacy or json).", + "webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.", + "webHookMode": "Strategy for sending audit events (block or batch).", +} + +func (AuditConfig) SwaggerDoc() map[string]string { + return map_AuditConfig +} + +var map_AugmentedActiveDirectoryConfig = map[string]string{ + "": "AugmentedActiveDirectoryConfig holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP server using the augmented Active Directory schema", + "usersQuery": "AllUsersQuery holds the template for an LDAP query that returns user entries.", + "userNameAttributes": "UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.", + "groupMembershipAttributes": "GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of", + "groupsQuery": "AllGroupsQuery holds the template for an LDAP query that returns group entries.", + "groupUIDAttribute": "GroupUIDAttributes defines which attribute on an LDAP group entry will be interpreted as its unique identifier. (ldapGroupUID)", + "groupNameAttributes": "GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for an OpenShift group", +} + +func (AugmentedActiveDirectoryConfig) SwaggerDoc() map[string]string { + return map_AugmentedActiveDirectoryConfig +} + +var map_BasicAuthPasswordIdentityProvider = map[string]string{ + "": "BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials", +} + +func (BasicAuthPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_BasicAuthPasswordIdentityProvider +} + +var map_BuildDefaultsConfig = map[string]string{ + "": "BuildDefaultsConfig controls the default information for Builds", + "gitHTTPProxy": "gitHTTPProxy is the location of the HTTPProxy for Git source", + "gitHTTPSProxy": "gitHTTPSProxy is the location of the HTTPSProxy for Git source", + "gitNoProxy": "gitNoProxy is the list of domains for which the proxy should not be used", + "env": "env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", + "sourceStrategyDefaults": "sourceStrategyDefaults are default values that apply to builds using the source strategy.", + "imageLabels": "imageLabels is a list of labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", + "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node", + "annotations": "annotations are annotations that will be added to the build pod", + "resources": "resources defines resource requirements to execute the build.", +} + +func (BuildDefaultsConfig) SwaggerDoc() map[string]string { + return map_BuildDefaultsConfig +} + +var map_BuildOverridesConfig = map[string]string{ + "": "BuildOverridesConfig controls override settings for builds", + "forcePull": "forcePull indicates whether the build strategy should always be set to ForcePull=true", + "imageLabels": "imageLabels is a list of labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", + "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node", + "annotations": "annotations are annotations that will be added to the build pod", + "tolerations": "tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", +} + +func (BuildOverridesConfig) SwaggerDoc() map[string]string { + return map_BuildOverridesConfig +} + +var map_CertInfo = map[string]string{ + "": "CertInfo relates a certificate with a private key", + "certFile": "CertFile is a file containing a PEM-encoded certificate", + "keyFile": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", +} + +func (CertInfo) SwaggerDoc() map[string]string { + return map_CertInfo +} + +var map_ClientConnectionOverrides = map[string]string{ + "": "ClientConnectionOverrides are a set of overrides to the default client connection settings.", + "acceptContentTypes": "AcceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.", + "contentType": "ContentType is the content type used when sending data to the server from this client.", + "qps": "QPS controls the number of queries per second allowed for this connection.", + "burst": "Burst allows extra queries to accumulate when a client is exceeding its rate.", +} + +func (ClientConnectionOverrides) SwaggerDoc() map[string]string { + return map_ClientConnectionOverrides +} + +var map_ClusterNetworkEntry = map[string]string{ + "": "ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.", + "cidr": "CIDR defines the total range of a cluster networks address space.", + "hostSubnetLength": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.", +} + +func (ClusterNetworkEntry) SwaggerDoc() map[string]string { + return map_ClusterNetworkEntry +} + +var map_ControllerConfig = map[string]string{ + "": "ControllerConfig holds configuration values for controllers", + "controllers": "Controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller \"+ named 'foo', '-foo' disables the controller named 'foo'. Defaults to \"*\".", + "election": "Election defines the configuration for electing a controller instance to make changes to the cluster. If unspecified, the ControllerTTL value is checked to determine whether the legacy direct etcd election code will be used.", + "serviceServingCert": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", +} + +func (ControllerConfig) SwaggerDoc() map[string]string { + return map_ControllerConfig +} + +var map_ControllerElectionConfig = map[string]string{ + "": "ControllerElectionConfig contains configuration values for deciding how a controller will be elected to act as leader.", + "lockName": "LockName is the resource name used to act as the lock for determining which controller instance should lead.", + "lockNamespace": "LockNamespace is the resource namespace used to act as the lock for determining which controller instance should lead. It defaults to \"kube-system\"", + "lockResource": "LockResource is the group and resource name to use to coordinate for the controller lock. If unset, defaults to \"configmaps\".", +} + +func (ControllerElectionConfig) SwaggerDoc() map[string]string { + return map_ControllerElectionConfig +} + +var map_DNSConfig = map[string]string{ + "": "DNSConfig holds the necessary configuration options for DNS", + "bindAddress": "BindAddress is the ip:port to serve DNS on", + "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "allowRecursiveQueries": "AllowRecursiveQueries allows the DNS server on the master to answer queries recursively. Note that open resolvers can be used for DNS amplification attacks and the master DNS should not be made accessible to public networks.", +} + +func (DNSConfig) SwaggerDoc() map[string]string { + return map_DNSConfig +} + +var map_DefaultAdmissionConfig = map[string]string{ + "": "DefaultAdmissionConfig can be used to enable or disable various admission plugins. When this type is present as the `configuration` object under `pluginConfig` and *if* the admission plugin supports it, this will cause an \"off by default\" admission plugin to be enabled", + "disable": "Disable turns off an admission plugin that is enabled by default.", +} + +func (DefaultAdmissionConfig) SwaggerDoc() map[string]string { + return map_DefaultAdmissionConfig +} + +var map_DenyAllPasswordIdentityProvider = map[string]string{ + "": "DenyAllPasswordIdentityProvider provides no identities for users", +} + +func (DenyAllPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_DenyAllPasswordIdentityProvider +} + +var map_DockerConfig = map[string]string{ + "": "DockerConfig holds Docker related configuration options.", + "execHandlerName": "ExecHandlerName is the name of the handler to use for executing commands in containers.", + "dockerShimSocket": "DockerShimSocket is the location of the dockershim socket the kubelet uses. Currently unix socket is supported on Linux, and tcp is supported on windows. Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735'", + "dockerShimRootDirectory": "DockershimRootDirectory is the dockershim root directory.", +} + +func (DockerConfig) SwaggerDoc() map[string]string { + return map_DockerConfig +} + +var map_EtcdConfig = map[string]string{ + "": "EtcdConfig holds the necessary configuration options for connecting with an etcd database", + "servingInfo": "ServingInfo describes how to start serving the etcd master", + "address": "Address is the advertised host:port for client connections to etcd", + "peerServingInfo": "PeerServingInfo describes how to start serving the etcd peer", + "peerAddress": "PeerAddress is the advertised host:port for peer connections to etcd", + "storageDirectory": "StorageDir is the path to the etcd storage directory", +} + +func (EtcdConfig) SwaggerDoc() map[string]string { + return map_EtcdConfig +} + +var map_EtcdConnectionInfo = map[string]string{ + "": "EtcdConnectionInfo holds information necessary for connecting to an etcd server", + "urls": "URLs are the URLs for etcd", + "ca": "CA is a file containing trusted roots for the etcd server certificates", +} + +func (EtcdConnectionInfo) SwaggerDoc() map[string]string { + return map_EtcdConnectionInfo +} + +var map_EtcdStorageConfig = map[string]string{ + "": "EtcdStorageConfig holds the necessary configuration options for the etcd storage underlying OpenShift and Kubernetes", + "kubernetesStorageVersion": "KubernetesStorageVersion is the API version that Kube resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.", + "kubernetesStoragePrefix": "KubernetesStoragePrefix is the path within etcd that the Kubernetes resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'kubernetes.io'.", + "openShiftStorageVersion": "OpenShiftStorageVersion is the API version that OS resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.", + "openShiftStoragePrefix": "OpenShiftStoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'openshift.io'.", +} + +func (EtcdStorageConfig) SwaggerDoc() map[string]string { + return map_EtcdStorageConfig +} + +var map_GitHubIdentityProvider = map[string]string{ + "": "GitHubIdentityProvider provides identities for users authenticating using GitHub credentials", + "clientID": "ClientID is the oauth client ID", + "clientSecret": "ClientSecret is the oauth client secret", + "organizations": "Organizations optionally restricts which organizations are allowed to log in", + "teams": "Teams optionally restricts which teams are allowed to log in. Format is <org>/<team>.", + "hostname": "Hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname.", + "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value.", +} + +func (GitHubIdentityProvider) SwaggerDoc() map[string]string { + return map_GitHubIdentityProvider +} + +var map_GitLabIdentityProvider = map[string]string{ + "": "GitLabIdentityProvider provides identities for users authenticating using GitLab credentials", + "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "url": "URL is the oauth server base URL", + "clientID": "ClientID is the oauth client ID", + "clientSecret": "ClientSecret is the oauth client secret", + "legacy": "Legacy determines if OAuth2 or OIDC should be used If true, OAuth2 is used If false, OIDC is used If nil and the URL's host is gitlab.com, OIDC is used Otherwise, OAuth2 is used In a future release, nil will default to using OIDC Eventually this flag will be removed and only OIDC will be used", +} + +func (GitLabIdentityProvider) SwaggerDoc() map[string]string { + return map_GitLabIdentityProvider +} + +var map_GoogleIdentityProvider = map[string]string{ + "": "GoogleIdentityProvider provides identities for users authenticating using Google credentials", + "clientID": "ClientID is the oauth client ID", + "clientSecret": "ClientSecret is the oauth client secret", + "hostedDomain": "HostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to", +} + +func (GoogleIdentityProvider) SwaggerDoc() map[string]string { + return map_GoogleIdentityProvider +} + +var map_GrantConfig = map[string]string{ + "": "GrantConfig holds the necessary configuration options for grant handlers", + "method": "Method determines the default strategy to use when an OAuth client requests a grant. This method will be used only if the specific OAuth client doesn't provide a strategy of their own. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients\n - deny: always denies grant requests, useful for black-listed clients", + "serviceAccountMethod": "ServiceAccountMethod is used for determining client authorization for service account oauth client. It must be either: deny, prompt", +} + +func (GrantConfig) SwaggerDoc() map[string]string { + return map_GrantConfig +} + +var map_GroupResource = map[string]string{ + "": "GroupResource points to a resource by its name and API group.", + "group": "Group is the name of an API group", + "resource": "Resource is the name of a resource.", +} + +func (GroupResource) SwaggerDoc() map[string]string { + return map_GroupResource +} + +var map_HTPasswdPasswordIdentityProvider = map[string]string{ + "": "HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials", + "file": "File is a reference to your htpasswd file", +} + +func (HTPasswdPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_HTPasswdPasswordIdentityProvider +} + +var map_HTTPServingInfo = map[string]string{ + "": "HTTPServingInfo holds configuration for serving HTTP", + "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", + "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", +} + +func (HTTPServingInfo) SwaggerDoc() map[string]string { + return map_HTTPServingInfo +} + +var map_IdentityProvider = map[string]string{ + "": "IdentityProvider provides identities for users authenticating using credentials", + "name": "Name is used to qualify the identities returned by this provider", + "challenge": "UseAsChallenger indicates whether to issue WWW-Authenticate challenges for this provider", + "login": "UseAsLogin indicates whether to use this identity provider for unauthenticated browsers to login against", + "mappingMethod": "MappingMethod determines how identities from this provider are mapped to users", + "provider": "Provider contains the information about how to set up a specific identity provider", +} + +func (IdentityProvider) SwaggerDoc() map[string]string { + return map_IdentityProvider +} + +var map_ImageConfig = map[string]string{ + "": "ImageConfig holds the necessary configuration options for building image names for system components", + "format": "Format is the format of the name to be built for the system component", + "latest": "Latest determines if the latest tag will be pulled from the registry", +} + +func (ImageConfig) SwaggerDoc() map[string]string { + return map_ImageConfig +} + +var map_ImagePolicyConfig = map[string]string{ + "": "ImagePolicyConfig holds the necessary configuration options for limits and behavior for importing images", + "maxImagesBulkImportedPerRepository": "MaxImagesBulkImportedPerRepository controls the number of images that are imported when a user does a bulk import of a container repository. This number defaults to 50 to prevent users from importing large numbers of images accidentally. Set -1 for no limit.", + "disableScheduledImport": "DisableScheduledImport allows scheduled background import of images to be disabled.", + "scheduledImageImportMinimumIntervalSeconds": "ScheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams scheduled for background import are checked against the upstream repository. The default value is 15 minutes.", + "maxScheduledImageImportsPerMinute": "MaxScheduledImageImportsPerMinute is the maximum number of scheduled image streams that will be imported in the background per minute. The default value is 60. Set to -1 for unlimited.", + "allowedRegistriesForImport": "AllowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", + "internalRegistryHostname": "InternalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.", + "externalRegistryHostname": "ExternalRegistryHostname sets the hostname for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", + "additionalTrustedCA": "AdditionalTrustedCA is a path to a pem bundle file containing additional CAs that should be trusted during imagestream import.", +} + +func (ImagePolicyConfig) SwaggerDoc() map[string]string { + return map_ImagePolicyConfig +} + +var map_JenkinsPipelineConfig = map[string]string{ + "": "JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy", + "autoProvisionEnabled": "AutoProvisionEnabled determines whether a Jenkins server will be spawned from the provided template when the first build config in the project with type JenkinsPipeline is created. When not specified this option defaults to true.", + "templateNamespace": "TemplateNamespace contains the namespace name where the Jenkins template is stored", + "templateName": "TemplateName is the name of the default Jenkins template", + "serviceName": "ServiceName is the name of the Jenkins service OpenShift uses to detect whether a Jenkins pipeline handler has already been installed in a project. This value *must* match a service name in the provided template.", + "parameters": "Parameters specifies a set of optional parameters to the Jenkins template.", +} + +func (JenkinsPipelineConfig) SwaggerDoc() map[string]string { + return map_JenkinsPipelineConfig +} + +var map_KeystonePasswordIdentityProvider = map[string]string{ + "": "KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials", + "domainName": "Domain Name is required for keystone v3", + "useKeystoneIdentity": "UseKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username", +} + +func (KeystonePasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_KeystonePasswordIdentityProvider +} + +var map_KubeletConnectionInfo = map[string]string{ + "": "KubeletConnectionInfo holds information necessary for connecting to a kubelet", + "port": "Port is the port to connect to kubelets on", + "ca": "CA is the CA for verifying TLS connections to kubelets", +} + +func (KubeletConnectionInfo) SwaggerDoc() map[string]string { + return map_KubeletConnectionInfo +} + +var map_KubernetesMasterConfig = map[string]string{ + "": "KubernetesMasterConfig holds the necessary configuration options for the Kubernetes master", + "apiLevels": "APILevels is a list of API levels that should be enabled on startup: v1 as examples", + "disabledAPIGroupVersions": "DisabledAPIGroupVersions is a map of groups to the versions (or *) that should be disabled.", + "masterIP": "MasterIP is the public IP address of kubernetes stuff. If empty, the first result from net.InterfaceAddrs will be used.", + "masterEndpointReconcileTTL": "MasterEndpointReconcileTTL sets the time to live in seconds of an endpoint record recorded by each master. The endpoints are checked at an interval that is 2/3 of this value and this value defaults to 15s if unset. In very large clusters, this value may be increased to reduce the possibility that the master endpoint record expires (due to other load on the etcd server) and causes masters to drop in and out of the kubernetes service record. It is not recommended to set this value below 15s.", + "servicesSubnet": "ServicesSubnet is the subnet to use for assigning service IPs", + "servicesNodePortRange": "ServicesNodePortRange is the range to use for assigning service public ports on a host.", + "schedulerConfigFile": "SchedulerConfigFile points to a file that describes how to set up the scheduler. If empty, you get the default scheduling rules.", + "podEvictionTimeout": "PodEvictionTimeout controls grace period for deleting pods on failed nodes. It takes valid time duration string. If empty, you get the default pod eviction timeout.", + "proxyClientInfo": "ProxyClientInfo specifies the client cert/key to use when proxying to pods", + "apiServerArguments": "APIServerArguments are key value pairs that will be passed directly to the Kube apiserver that match the apiservers's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", + "controllerArguments": "ControllerArguments are key value pairs that will be passed directly to the Kube controller manager that match the controller manager's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", + "schedulerArguments": "SchedulerArguments are key value pairs that will be passed directly to the Kube scheduler that match the scheduler's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", +} + +func (KubernetesMasterConfig) SwaggerDoc() map[string]string { + return map_KubernetesMasterConfig +} + +var map_LDAPAttributeMapping = map[string]string{ + "": "LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields", + "id": "ID is the list of attributes whose values should be used as the user ID. Required. LDAP standard identity attribute is \"dn\"", + "preferredUsername": "PreferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"", + "name": "Name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"", + "email": "Email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", +} + +func (LDAPAttributeMapping) SwaggerDoc() map[string]string { + return map_LDAPAttributeMapping +} + +var map_LDAPPasswordIdentityProvider = map[string]string{ + "": "LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials", + "url": "URL is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is\n ldap://host:port/basedn?attribute?scope?filter", + "bindDN": "BindDN is an optional DN to bind with during the search phase.", + "bindPassword": "BindPassword is an optional password to bind with during the search phase.", + "insecure": "Insecure, if true, indicates the connection should not use TLS. Cannot be set to true with a URL scheme of \"ldaps://\" If false, \"ldaps://\" URLs connect using TLS, and \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830", + "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "attributes": "Attributes maps LDAP attributes to identities", +} + +func (LDAPPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_LDAPPasswordIdentityProvider +} + +var map_LDAPQuery = map[string]string{ + "": "LDAPQuery holds the options necessary to build an LDAP query", + "baseDN": "The DN of the branch of the directory where all searches should start from", + "scope": "The (optional) scope of the search. Can be: base: only the base object, one: all object on the base level, sub: the entire subtree Defaults to the entire subtree if not set", + "derefAliases": "The (optional) behavior of the search with regards to alisases. Can be: never: never dereference aliases, search: only dereference in searching, base: only dereference in finding the base object, always: always dereference Defaults to always dereferencing if not set", + "timeout": "TimeLimit holds the limit of time in seconds that any request to the server can remain outstanding before the wait for a response is given up. If this is 0, no client-side limit is imposed", + "filter": "Filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN", + "pageSize": "PageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done.", +} + +func (LDAPQuery) SwaggerDoc() map[string]string { + return map_LDAPQuery +} + +var map_LDAPSyncConfig = map[string]string{ + "": "LDAPSyncConfig holds the necessary configuration options to define an LDAP group sync", + "url": "Host is the scheme, host and port of the LDAP server to connect to: scheme://host:port", + "bindDN": "BindDN is an optional DN to bind to the LDAP server with", + "bindPassword": "BindPassword is an optional password to bind with during the search phase.", + "insecure": "Insecure, if true, indicates the connection should not use TLS. Cannot be set to true with a URL scheme of \"ldaps://\" If false, \"ldaps://\" URLs connect using TLS, and \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830", + "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "groupUIDNameMapping": "LDAPGroupUIDToOpenShiftGroupNameMapping is an optional direct mapping of LDAP group UIDs to OpenShift Group names", + "rfc2307": "RFC2307Config holds the configuration for extracting data from an LDAP server set up in a fashion similar to RFC2307: first-class group and user entries, with group membership determined by a multi-valued attribute on the group entry listing its members", + "activeDirectory": "ActiveDirectoryConfig holds the configuration for extracting data from an LDAP server set up in a fashion similar to that used in Active Directory: first-class user entries, with group membership determined by a multi-valued attribute on members listing groups they are a member of", + "augmentedActiveDirectory": "AugmentedActiveDirectoryConfig holds the configuration for extracting data from an LDAP server set up in a fashion similar to that used in Active Directory as described above, with one addition: first-class group entries exist and are used to hold metadata but not group membership", +} + +func (LDAPSyncConfig) SwaggerDoc() map[string]string { + return map_LDAPSyncConfig +} + +var map_LocalQuota = map[string]string{ + "": "LocalQuota contains options for controlling local volume quota on the node.", + "perFSGroup": "FSGroup can be specified to enable a quota on local storage use per unique FSGroup ID. At present this is only implemented for emptyDir volumes, and if the underlying volumeDirectory is on an XFS filesystem.", +} + +func (LocalQuota) SwaggerDoc() map[string]string { + return map_LocalQuota +} + +var map_MasterAuthConfig = map[string]string{ + "": "MasterAuthConfig configures authentication options in addition to the standard oauth token and client certificate authenticators", + "requestHeader": "RequestHeader holds options for setting up a front proxy against the the API. It is optional.", + "webhookTokenAuthenticators": "WebhookTokenAuthnConfig, if present configures remote token reviewers", + "oauthMetadataFile": "OAuthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization Server Metadata for an external OAuth server. See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This option is mutually exclusive with OAuthConfig", +} + +func (MasterAuthConfig) SwaggerDoc() map[string]string { + return map_MasterAuthConfig +} + +var map_MasterClients = map[string]string{ + "": "MasterClients holds references to `.kubeconfig` files that qualify master clients for OpenShift and Kubernetes", + "openshiftLoopbackKubeConfig": "OpenShiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master", + "openshiftLoopbackClientConnectionOverrides": "OpenShiftLoopbackClientConnectionOverrides specifies client overrides for system components to loop back to this master.", +} + +func (MasterClients) SwaggerDoc() map[string]string { + return map_MasterClients +} + +var map_MasterConfig = map[string]string{ + "": "MasterConfig holds the necessary configuration options for the OpenShift master", + "servingInfo": "ServingInfo describes how to start serving", + "authConfig": "AuthConfig configures authentication options in addition to the standard oauth token and client certificate authenticators", + "aggregatorConfig": "AggregatorConfig has options for configuring the aggregator component of the API server.", + "corsAllowedOrigins": "CORSAllowedOrigins", + "apiLevels": "APILevels is a list of API levels that should be enabled on startup: v1 as examples", + "masterPublicURL": "MasterPublicURL is how clients can access the OpenShift API server", + "controllers": "Controllers is a list of the controllers that should be started. If set to \"none\", no controllers will start automatically. The default value is \"*\" which will start all controllers. When using \"*\", you may exclude controllers by prepending a \"-\" in front of their name. No other values are recognized at this time.", + "admissionConfig": "AdmissionConfig contains admission control plugin configuration.", + "controllerConfig": "ControllerConfig holds configuration values for controllers", + "etcdStorageConfig": "EtcdStorageConfig contains information about how API resources are stored in Etcd. These values are only relevant when etcd is the backing store for the cluster.", + "etcdClientInfo": "EtcdClientInfo contains information about how to connect to etcd", + "kubeletClientInfo": "KubeletClientInfo contains information about how to connect to kubelets", + "kubernetesMasterConfig": "KubernetesMasterConfig, if present start the kubernetes master in this process", + "etcdConfig": "EtcdConfig, if present start etcd in this process", + "oauthConfig": "OAuthConfig, if present start the /oauth endpoint in this process", + "dnsConfig": "DNSConfig, if present start the DNS server in this process", + "serviceAccountConfig": "ServiceAccountConfig holds options related to service accounts", + "masterClients": "MasterClients holds all the client connection information for controllers and other system components", + "imageConfig": "ImageConfig holds options that describe how to build image names for system components", + "imagePolicyConfig": "ImagePolicyConfig controls limits and behavior for importing images", + "policyConfig": "PolicyConfig holds information about where to locate critical pieces of bootstrapping policy", + "projectConfig": "ProjectConfig holds information about project creation and defaults", + "routingConfig": "RoutingConfig holds information about routing and route generation", + "networkConfig": "NetworkConfig to be passed to the compiled in network plugin", + "volumeConfig": "MasterVolumeConfig contains options for configuring volume plugins in the master node.", + "jenkinsPipelineConfig": "JenkinsPipelineConfig holds information about the default Jenkins template used for JenkinsPipeline build strategy.", + "auditConfig": "AuditConfig holds information related to auditing capabilities.", +} + +func (MasterConfig) SwaggerDoc() map[string]string { + return map_MasterConfig +} + +var map_MasterNetworkConfig = map[string]string{ + "": "MasterNetworkConfig to be passed to the compiled in network plugin", + "networkPluginName": "NetworkPluginName is the name of the network plugin to use", + "clusterNetworkCIDR": "ClusterNetworkCIDR is the CIDR string to specify the global overlay network's L3 space. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.", + "clusterNetworks": "ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addressed from. If this is specified, then ClusterNetworkCIDR and HostSubnetLength may not be set.", + "hostSubnetLength": "HostSubnetLength is the number of bits to allocate to each host's subnet e.g. 8 would mean a /24 network on the host. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.", + "serviceNetworkCIDR": "ServiceNetwork is the CIDR string to specify the service networks", + "externalIPNetworkCIDRs": "ExternalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons.", + "ingressIPNetworkCIDR": "IngressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from. For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips, nodes, pods, or services.", + "vxlanPort": "VXLANPort is the VXLAN port used by the cluster defaults. If it is not set, 4789 is the default value", +} + +func (MasterNetworkConfig) SwaggerDoc() map[string]string { + return map_MasterNetworkConfig +} + +var map_MasterVolumeConfig = map[string]string{ + "": "MasterVolumeConfig contains options for configuring volume plugins in the master node.", + "dynamicProvisioningEnabled": "DynamicProvisioningEnabled is a boolean that toggles dynamic provisioning off when false, defaults to true", +} + +func (MasterVolumeConfig) SwaggerDoc() map[string]string { + return map_MasterVolumeConfig +} + +var map_NamedCertificate = map[string]string{ + "": "NamedCertificate specifies a certificate/key, and the names it should be served for", + "names": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", +} + +func (NamedCertificate) SwaggerDoc() map[string]string { + return map_NamedCertificate +} + +var map_NodeAuthConfig = map[string]string{ + "": "NodeAuthConfig holds authn/authz configuration options", + "authenticationCacheTTL": "AuthenticationCacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled", + "authenticationCacheSize": "AuthenticationCacheSize indicates how many authentication results should be cached. If 0, the default cache size is used.", + "authorizationCacheTTL": "AuthorizationCacheTTL indicates how long an authorization result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled", + "authorizationCacheSize": "AuthorizationCacheSize indicates how many authorization results should be cached. If 0, the default cache size is used.", +} + +func (NodeAuthConfig) SwaggerDoc() map[string]string { + return map_NodeAuthConfig +} + +var map_NodeConfig = map[string]string{ + "": "NodeConfig is the fully specified config starting an OpenShift node", + "nodeName": "NodeName is the value used to identify this particular node in the cluster. If possible, this should be your fully qualified hostname. If you're describing a set of static nodes to the master, this value must match one of the values in the list", + "nodeIP": "Node may have multiple IPs, specify the IP to use for pod traffic routing If not specified, network parse/lookup on the nodeName is performed and the first non-loopback address is used", + "servingInfo": "ServingInfo describes how to start serving", + "masterKubeConfig": "MasterKubeConfig is a filename for the .kubeconfig file that describes how to connect this node to the master", + "masterClientConnectionOverrides": "MasterClientConnectionOverrides provides overrides to the client connection used to connect to the master.", + "dnsDomain": "DNSDomain holds the domain suffix that will be used for the DNS search path inside each container. Defaults to 'cluster.local'.", + "dnsIP": "DNSIP is the IP address that pods will use to access cluster DNS. Defaults to the service IP of the Kubernetes master. This IP must be listening on port 53 for compatibility with libc resolvers (which cannot be configured to resolve names from any other port). When running more complex local DNS configurations, this is often set to the local address of a DNS proxy like dnsmasq, which then will consult either the local DNS (see dnsBindAddress) or the master DNS.", + "dnsBindAddress": "DNSBindAddress is the ip:port to serve DNS on. If this is not set, the DNS server will not be started. Because most DNS resolvers will only listen on port 53, if you select an alternative port you will need a DNS proxy like dnsmasq to answer queries for containers. A common configuration is dnsmasq configured on a node IP listening on 53 and delegating queries for dnsDomain to this process, while sending other queries to the host environments nameservers.", + "dnsNameservers": "DNSNameservers is a list of ip:port values of recursive nameservers to forward queries to when running a local DNS server if dnsBindAddress is set. If this value is empty, the DNS server will default to the nameservers listed in /etc/resolv.conf. If you have configured dnsmasq or another DNS proxy on the system, this value should be set to the upstream nameservers dnsmasq resolves with.", + "dnsRecursiveResolvConf": "DNSRecursiveResolvConf is a path to a resolv.conf file that contains settings for an upstream server. Only the nameservers and port fields are used. The file must exist and parse correctly. It adds extra nameservers to DNSNameservers if set.", + "networkPluginName": "Deprecated and maintained for backward compatibility, use NetworkConfig.NetworkPluginName instead", + "networkConfig": "NetworkConfig provides network options for the node", + "volumeDirectory": "VolumeDirectory is the directory that volumes will be stored under", + "imageConfig": "ImageConfig holds options that describe how to build image names for system components", + "allowDisabledDocker": "AllowDisabledDocker if true, the Kubelet will ignore errors from Docker. This means that a node can start on a machine that doesn't have docker started.", + "podManifestConfig": "PodManifestConfig holds the configuration for enabling the Kubelet to create pods based from a manifest file(s) placed locally on the node", + "authConfig": "AuthConfig holds authn/authz configuration options", + "dockerConfig": "DockerConfig holds Docker related configuration options.", + "kubeletArguments": "KubeletArguments are key value pairs that will be passed directly to the Kubelet that match the Kubelet's command line arguments. These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.", + "proxyArguments": "ProxyArguments are key value pairs that will be passed directly to the Proxy that match the Proxy's command line arguments. These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.", + "iptablesSyncPeriod": "IPTablesSyncPeriod is how often iptable rules are refreshed", + "enableUnidling": "EnableUnidling controls whether or not the hybrid unidling proxy will be set up", + "volumeConfig": "VolumeConfig contains options for configuring volumes on the node.", +} + +func (NodeConfig) SwaggerDoc() map[string]string { + return map_NodeConfig +} + +var map_NodeNetworkConfig = map[string]string{ + "": "NodeNetworkConfig provides network options for the node", + "networkPluginName": "NetworkPluginName is a string specifying the networking plugin", + "mtu": "Maximum transmission unit for the network packets", +} + +func (NodeNetworkConfig) SwaggerDoc() map[string]string { + return map_NodeNetworkConfig +} + +var map_NodeVolumeConfig = map[string]string{ + "": "NodeVolumeConfig contains options for configuring volumes on the node.", + "localQuota": "LocalQuota contains options for controlling local volume quota on the node.", +} + +func (NodeVolumeConfig) SwaggerDoc() map[string]string { + return map_NodeVolumeConfig +} + +var map_OAuthConfig = map[string]string{ + "": "OAuthConfig holds the necessary configuration options for OAuth authentication", + "masterCA": "MasterCA is the CA for verifying the TLS connection back to the MasterURL.", + "masterURL": "MasterURL is used for making server-to-server calls to exchange authorization codes for access tokens", + "masterPublicURL": "MasterPublicURL is used for building valid client redirect URLs for internal and external access", + "assetPublicURL": "AssetPublicURL is used for building valid client redirect URLs for external access", + "alwaysShowProviderSelection": "AlwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider.", + "identityProviders": "IdentityProviders is an ordered list of ways for a user to identify themselves", + "grantConfig": "GrantConfig describes how to handle grants", + "sessionConfig": "SessionConfig hold information about configuring sessions.", + "tokenConfig": "TokenConfig contains options for authorization and access tokens", + "templates": "Templates allow you to customize pages like the login page.", +} + +func (OAuthConfig) SwaggerDoc() map[string]string { + return map_OAuthConfig +} + +var map_OAuthTemplates = map[string]string{ + "": "OAuthTemplates allow for customization of pages like the login page", + "login": "Login is a path to a file containing a go template used to render the login page. If unspecified, the default login page is used.", + "providerSelection": "ProviderSelection is a path to a file containing a go template used to render the provider selection page. If unspecified, the default provider selection page is used.", + "error": "Error is a path to a file containing a go template used to render error pages during the authentication or grant flow If unspecified, the default error page is used.", +} + +func (OAuthTemplates) SwaggerDoc() map[string]string { + return map_OAuthTemplates +} + +var map_OpenIDClaims = map[string]string{ + "": "OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider", + "id": "ID is the list of claims whose values should be used as the user ID. Required. OpenID standard identity claim is \"sub\"", + "preferredUsername": "PreferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the id claim", + "name": "Name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity", + "email": "Email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", +} + +func (OpenIDClaims) SwaggerDoc() map[string]string { + return map_OpenIDClaims +} + +var map_OpenIDIdentityProvider = map[string]string{ + "": "OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials", + "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "clientID": "ClientID is the oauth client ID", + "clientSecret": "ClientSecret is the oauth client secret", + "extraScopes": "ExtraScopes are any scopes to request in addition to the standard \"openid\" scope.", + "extraAuthorizeParameters": "ExtraAuthorizeParameters are any custom parameters to add to the authorize request.", + "urls": "URLs to use to authenticate", + "claims": "Claims mappings", +} + +func (OpenIDIdentityProvider) SwaggerDoc() map[string]string { + return map_OpenIDIdentityProvider +} + +var map_OpenIDURLs = map[string]string{ + "": "OpenIDURLs are URLs to use when authenticating with an OpenID identity provider", + "authorize": "Authorize is the oauth authorization URL", + "token": "Token is the oauth token granting URL", + "userInfo": "UserInfo is the optional userinfo URL. If present, a granted access_token is used to request claims If empty, a granted id_token is parsed for claims", +} + +func (OpenIDURLs) SwaggerDoc() map[string]string { + return map_OpenIDURLs +} + +var map_PodManifestConfig = map[string]string{ + "": "PodManifestConfig holds the necessary configuration options for using pod manifests", + "path": "Path specifies the path for the pod manifest file or directory If its a directory, its expected to contain on or more manifest files This is used by the Kubelet to create pods on the node", + "fileCheckIntervalSeconds": "FileCheckIntervalSeconds is the interval in seconds for checking the manifest file(s) for new data The interval needs to be a positive value", +} + +func (PodManifestConfig) SwaggerDoc() map[string]string { + return map_PodManifestConfig +} + +var map_PolicyConfig = map[string]string{ + "": "\n holds the necessary configuration options for", + "userAgentMatchingConfig": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", +} + +func (PolicyConfig) SwaggerDoc() map[string]string { + return map_PolicyConfig +} + +var map_ProjectConfig = map[string]string{ + "": "\n holds the necessary configuration options for", + "defaultNodeSelector": "DefaultNodeSelector holds default project node label selector", + "projectRequestMessage": "ProjectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint", + "projectRequestTemplate": "ProjectRequestTemplate is the template to use for creating projects in response to projectrequest. It is in the format namespace/template and it is optional. If it is not specified, a default template is used.", + "securityAllocator": "SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.", +} + +func (ProjectConfig) SwaggerDoc() map[string]string { + return map_ProjectConfig +} + +var map_RFC2307Config = map[string]string{ + "": "RFC2307Config holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP server using the RFC2307 schema", + "groupsQuery": "AllGroupsQuery holds the template for an LDAP query that returns group entries.", + "groupUIDAttribute": "GroupUIDAttributes defines which attribute on an LDAP group entry will be interpreted as its unique identifier. (ldapGroupUID)", + "groupNameAttributes": "GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for an OpenShift group", + "groupMembershipAttributes": "GroupMembershipAttributes defines which attributes on an LDAP group entry will be interpreted as its members. The values contained in those attributes must be queryable by your UserUIDAttribute", + "usersQuery": "AllUsersQuery holds the template for an LDAP query that returns user entries.", + "userUIDAttribute": "UserUIDAttribute defines which attribute on an LDAP user entry will be interpreted as its unique identifier. It must correspond to values that will be found from the GroupMembershipAttributes", + "userNameAttributes": "UserNameAttributes defines which attributes on an LDAP user entry will be used, in order, as its OpenShift user name. The first attribute with a non-empty value is used. This should match your PreferredUsername setting for your LDAPPasswordIdentityProvider", + "tolerateMemberNotFoundErrors": "TolerateMemberNotFoundErrors determines the behavior of the LDAP sync job when missing user entries are encountered. If 'true', an LDAP query for users that doesn't find any will be tolerated and an only and error will be logged. If 'false', the LDAP sync job will fail if a query for users doesn't find any. The default value is 'false'. Misconfigured LDAP sync jobs with this flag set to 'true' can cause group membership to be removed, so it is recommended to use this flag with caution.", + "tolerateMemberOutOfScopeErrors": "TolerateMemberOutOfScopeErrors determines the behavior of the LDAP sync job when out-of-scope user entries are encountered. If 'true', an LDAP query for a user that falls outside of the base DN given for the all user query will be tolerated and only an error will be logged. If 'false', the LDAP sync job will fail if a user query would search outside of the base DN specified by the all user query. Misconfigured LDAP sync jobs with this flag set to 'true' can result in groups missing users, so it is recommended to use this flag with caution.", +} + +func (RFC2307Config) SwaggerDoc() map[string]string { + return map_RFC2307Config +} + +var map_RegistryLocation = map[string]string{ + "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.", + "domainName": "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", + "insecure": "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", +} + +func (RegistryLocation) SwaggerDoc() map[string]string { + return map_RegistryLocation +} + +var map_RemoteConnectionInfo = map[string]string{ + "": "RemoteConnectionInfo holds information necessary for establishing a remote connection", + "url": "URL is the remote URL to connect to", + "ca": "CA is the CA for verifying TLS connections", +} + +func (RemoteConnectionInfo) SwaggerDoc() map[string]string { + return map_RemoteConnectionInfo +} + +var map_RequestHeaderAuthenticationOptions = map[string]string{ + "": "RequestHeaderAuthenticationOptions provides options for setting up a front proxy against the entire API instead of against the /oauth endpoint.", + "clientCA": "ClientCA is a file with the trusted signer certs. It is required.", + "clientCommonNames": "ClientCommonNames is a required list of common names to require a match from.", + "usernameHeaders": "UsernameHeaders is the list of headers to check for user information. First hit wins.", + "groupHeaders": "GroupNameHeader is the set of headers to check for group information. All are unioned.", + "extraHeaderPrefixes": "ExtraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested.", +} + +func (RequestHeaderAuthenticationOptions) SwaggerDoc() map[string]string { + return map_RequestHeaderAuthenticationOptions +} + +var map_RequestHeaderIdentityProvider = map[string]string{ + "": "RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials", + "loginURL": "LoginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", + "challengeURL": "ChallengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", + "clientCA": "ClientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header.", + "clientCommonNames": "ClientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.", + "headers": "Headers is the set of headers to check for identity information", + "preferredUsernameHeaders": "PreferredUsernameHeaders is the set of headers to check for the preferred username", + "nameHeaders": "NameHeaders is the set of headers to check for the display name", + "emailHeaders": "EmailHeaders is the set of headers to check for the email address", +} + +func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string { + return map_RequestHeaderIdentityProvider +} + +var map_RoutingConfig = map[string]string{ + "": "RoutingConfig holds the necessary configuration options for routing to subdomains", + "subdomain": "Subdomain is the suffix appended to $service.$namespace. to form the default route hostname DEPRECATED: This field is being replaced by routers setting their own defaults. This is the \"default\" route.", +} + +func (RoutingConfig) SwaggerDoc() map[string]string { + return map_RoutingConfig +} + +var map_SecurityAllocator = map[string]string{ + "": "SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.", + "uidAllocatorRange": "UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the ranges container images will use once user namespaces are started).", + "mcsAllocatorRange": "MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is \"<prefix>/<numberOfLabels>[,<maxCategory>]\". The default is \"s0/2\" and will allocate from c0 -> c1023, which means a total of 535k labels are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default will allow the server to set them automatically.\n\nExamples: * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511", + "mcsLabelsPerProject": "MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS ranges (100k namespaces, 535k/5 labels).", +} + +func (SecurityAllocator) SwaggerDoc() map[string]string { + return map_SecurityAllocator +} + +var map_ServiceAccountConfig = map[string]string{ + "": "ServiceAccountConfig holds the necessary configuration options for a service account", + "managedNames": "ManagedNames is a list of service account names that will be auto-created in every namespace. If no names are specified, the ServiceAccountsController will not be started.", + "limitSecretReferences": "LimitSecretReferences controls whether or not to allow a service account to reference any secret in a namespace without explicitly referencing them", + "privateKeyFile": "PrivateKeyFile is a file containing a PEM-encoded private RSA key, used to sign service account tokens. If no private key is specified, the service account TokensController will not be started.", + "publicKeyFiles": "PublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. (If any file contains a private key, the public portion of the key is used) The list of public keys is used to verify presented service account tokens. Each key is tried in order until the list is exhausted or verification succeeds. If no keys are specified, no service account authentication will be available.", + "masterCA": "MasterCA is the CA for verifying the TLS connection back to the master. The service account controller will automatically inject the contents of this file into pods so they can verify connections to the master.", +} + +func (ServiceAccountConfig) SwaggerDoc() map[string]string { + return map_ServiceAccountConfig +} + +var map_ServiceServingCert = map[string]string{ + "": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", + "signer": "Signer holds the signing information used to automatically sign serving certificates. If this value is nil, then certs are not signed automatically.", +} + +func (ServiceServingCert) SwaggerDoc() map[string]string { + return map_ServiceServingCert +} + +var map_ServingInfo = map[string]string{ + "": "ServingInfo holds information about serving web pages", + "bindAddress": "BindAddress is the ip:port to serve on", + "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "clientCA": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", + "namedCertificates": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames", + "minTLSVersion": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", + "cipherSuites": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", +} + +func (ServingInfo) SwaggerDoc() map[string]string { + return map_ServingInfo +} + +var map_SessionConfig = map[string]string{ + "": "SessionConfig specifies options for cookie-based sessions. Used by AuthRequestHandlerSession", + "sessionSecretsFile": "SessionSecretsFile is a reference to a file containing a serialized SessionSecrets object If no file is specified, a random signing and encryption key are generated at each server start", + "sessionMaxAgeSeconds": "SessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession", + "sessionName": "SessionName is the cookie name used to store the session", +} + +func (SessionConfig) SwaggerDoc() map[string]string { + return map_SessionConfig +} + +var map_SessionSecret = map[string]string{ + "": "SessionSecret is a secret used to authenticate/decrypt cookie-based sessions", + "authentication": "Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.", + "encryption": "Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-", +} + +func (SessionSecret) SwaggerDoc() map[string]string { + return map_SessionSecret +} + +var map_SessionSecrets = map[string]string{ + "": "SessionSecrets list the secrets to use to sign/encrypt and authenticate/decrypt created sessions.", + "secrets": "Secrets is a list of secrets New sessions are signed and encrypted using the first secret. Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.", +} + +func (SessionSecrets) SwaggerDoc() map[string]string { + return map_SessionSecrets +} + +var map_SourceStrategyDefaultsConfig = map[string]string{ + "": "SourceStrategyDefaultsConfig contains values that apply to builds using the source strategy.", + "incremental": "incremental indicates if s2i build strategies should perform an incremental build or not", +} + +func (SourceStrategyDefaultsConfig) SwaggerDoc() map[string]string { + return map_SourceStrategyDefaultsConfig +} + +var map_StringSource = map[string]string{ + "": "StringSource allows specifying a string inline, or externally via env var or file. When it contains only a string value, it marshals to a simple JSON string.", +} + +func (StringSource) SwaggerDoc() map[string]string { + return map_StringSource +} + +var map_StringSourceSpec = map[string]string{ + "": "StringSourceSpec specifies a string value, or external location", + "value": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.", + "env": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", + "file": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", + "keyFile": "KeyFile references a file containing the key to use to decrypt the value.", +} + +func (StringSourceSpec) SwaggerDoc() map[string]string { + return map_StringSourceSpec +} + +var map_TokenConfig = map[string]string{ + "": "TokenConfig holds the necessary configuration options for authorization and access tokens", + "authorizeTokenMaxAgeSeconds": "AuthorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens", + "accessTokenMaxAgeSeconds": "AccessTokenMaxAgeSeconds defines the maximum age of access tokens", + "accessTokenInactivityTimeoutSeconds": "AccessTokenInactivityTimeoutSeconds defined the default token inactivity timeout for tokens granted by any client. Setting it to nil means the feature is completely disabled (default) The default setting can be overriden on OAuthClient basis. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Valid values are: - 0: Tokens never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)", +} + +func (TokenConfig) SwaggerDoc() map[string]string { + return map_TokenConfig +} + +var map_UserAgentDenyRule = map[string]string{ + "": "UserAgentDenyRule adds a rejection message that can be used to help a user figure out how to get an approved client", + "rejectionMessage": "RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.", +} + +func (UserAgentDenyRule) SwaggerDoc() map[string]string { + return map_UserAgentDenyRule +} + +var map_UserAgentMatchRule = map[string]string{ + "": "UserAgentMatchRule describes how to match a given request based on User-Agent and HTTPVerb", + "regex": "UserAgentRegex is a regex that is checked against the User-Agent. Known variants of oc clients 1. oc accessing kube resources: oc/v1.2.0 (linux/amd64) kubernetes/bc4550d 2. oc accessing openshift resources: oc/v1.1.3 (linux/amd64) openshift/b348c2f 3. openshift kubectl accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d 4. openshift kubectl accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f 5. oadm accessing kube resources: oadm/v1.2.0 (linux/amd64) kubernetes/bc4550d 6. oadm accessing openshift resources: oadm/v1.1.3 (linux/amd64) openshift/b348c2f 7. openshift cli accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d 8. openshift cli accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f", + "httpVerbs": "HTTPVerbs specifies which HTTP verbs should be matched. An empty list means \"match all verbs\".", +} + +func (UserAgentMatchRule) SwaggerDoc() map[string]string { + return map_UserAgentMatchRule +} + +var map_UserAgentMatchingConfig = map[string]string{ + "": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", + "requiredClients": "If this list is non-empty, then a User-Agent must match one of the UserAgentRegexes to be allowed", + "deniedClients": "If this list is non-empty, then a User-Agent must not match any of the UserAgentRegexes", + "defaultRejectionMessage": "DefaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given.", +} + +func (UserAgentMatchingConfig) SwaggerDoc() map[string]string { + return map_UserAgentMatchingConfig +} + +var map_WebhookTokenAuthenticator = map[string]string{ + "": "WebhookTokenAuthenticators holds the necessary configuation options for external token authenticators", + "configFile": "ConfigFile is a path to a Kubeconfig file with the webhook configuration", + "cacheTTL": "CacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get a default timeout of 2 minutes. If zero (e.g. \"0m\"), caching is disabled", +} + +func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string { + return map_WebhookTokenAuthenticator +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/network/OWNERS b/vendor/github.com/openshift/api/network/OWNERS new file mode 100644 index 0000000000000000000000000000000000000000..279009f7ae368da0b4cf589636ea063368dde021 --- /dev/null +++ b/vendor/github.com/openshift/api/network/OWNERS @@ -0,0 +1,4 @@ +reviewers: + - danwinship + - dcbw + - knobunc diff --git a/vendor/github.com/openshift/api/network/install.go b/vendor/github.com/openshift/api/network/install.go new file mode 100644 index 0000000000000000000000000000000000000000..85bc706236e672d5cc6dd7601942e45878a11ecd --- /dev/null +++ b/vendor/github.com/openshift/api/network/install.go @@ -0,0 +1,26 @@ +package network + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + networkv1 "github.com/openshift/api/network/v1" +) + +const ( + GroupName = "network.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(networkv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/network/v1/001-clusternetwork-crd.yaml b/vendor/github.com/openshift/api/network/v1/001-clusternetwork-crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b064daade51e7aa2ab6835a0d1699eab248077ab --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/001-clusternetwork-crd.yaml @@ -0,0 +1,134 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: clusternetworks.network.openshift.io +spec: + group: network.openshift.io + names: + kind: ClusterNetwork + listKind: ClusterNetworkList + plural: clusternetworks + singular: clusternetwork + scope: Cluster + validation: + # As compared to ValidateClusterNetwork, this does not validate that: + # - the hostSubnetLengths are valid for their CIDRs + # - the cluster/service networks do not overlap + # - .network and .hostsubnetlength are set if name == 'default' + # - .network and .hostsubnetlength are either unset, or equal to + # .clusterNetworks[0].CIDR and .clusterNetworks[0].hostSubnetLength + openAPIV3Schema: + description: ClusterNetwork describes the cluster network. There is normally + only one object of this type, named "default", which is created by the SDN + network plugin based on the master configuration when the cluster is brought + up for the first time. + type: object + required: + - clusterNetworks + - serviceNetwork + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + clusterNetworks: + description: ClusterNetworks is a list of ClusterNetwork objects that defines + the global overlay network's L3 space by specifying a set of CIDR and + netmasks that the SDN can allocate addresses from. + type: array + items: + description: ClusterNetworkEntry defines an individual cluster network. + The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved + for external ips, CIDRs reserved for service networks, and CIDRs reserved + for ingress ips. + type: object + required: + - CIDR + - hostSubnetLength + properties: + CIDR: + description: CIDR defines the total range of a cluster networks address + space. + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + hostSubnetLength: + description: HostSubnetLength is the number of bits of the accompanying + CIDR address to allocate to each node. eg, 8 would mean that each + node would have a /24 slice of the overlay network for its pods. + type: integer + format: int32 + maximum: 30 + minimum: 2 + hostsubnetlength: + description: HostSubnetLength is the number of bits of network to allocate + to each node. eg, 8 would mean that each node would have a /24 slice of + the overlay network for its pods + type: integer + format: int32 + maximum: 30 + minimum: 2 + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + mtu: + description: MTU is the MTU for the overlay network. This should be 50 less + than the MTU of the network connecting the nodes. It is normally autodetected + by the cluster network operator. + type: integer + format: int32 + maximum: 65536 + minimum: 576 + network: + description: Network is a CIDR string specifying the global overlay network's + L3 space + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + pluginName: + description: PluginName is the name of the network plugin being used + type: string + serviceNetwork: + description: ServiceNetwork is the CIDR range that Service IP addresses + are allocated from + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + vxlanPort: + description: VXLANPort sets the VXLAN destination port used by the cluster. + It is set by the master configuration file on startup and cannot be edited + manually. Valid values for VXLANPort are integers 1-65535 inclusive and + if unset defaults to 4789. Changing VXLANPort allows users to resolve + issues between openshift SDN and other software trying to use the same + VXLAN destination port. + type: integer + format: int32 + maximum: 65535 + minimum: 1 + additionalPrinterColumns: + - name: Cluster Network + type: string + description: The primary cluster network CIDR + JSONPath: .network + - name: Service Network + type: string + description: The service network CIDR + JSONPath: .serviceNetwork + - name: Plugin Name + type: string + description: The OpenShift SDN network plug-in in use + JSONPath: .pluginName + version: v1 + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/network/v1/002-hostsubnet-crd.yaml b/vendor/github.com/openshift/api/network/v1/002-hostsubnet-crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9b8af49147a51cbc3ce7eba01919c0c8f5e4cc5c --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/002-hostsubnet-crd.yaml @@ -0,0 +1,111 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: hostsubnets.network.openshift.io +spec: + group: network.openshift.io + names: + kind: HostSubnet + listKind: HostSubnetList + plural: hostsubnets + singular: hostsubnet + scope: Cluster + validation: + # As compared to ValidateHostSubnet, this does not validate that: + # - .host == .name + # - either .subnet is set or the assign-subnet annotation is present + # As compared to ValidateHostSubnetUpdate, this does not validate that: + # - .subnet is not changed on an existing object + openAPIV3Schema: + description: HostSubnet describes the container subnet network on a node. The + HostSubnet object must have the same name as the Node object it corresponds + to. + type: object + required: + - host + - hostIP + - subnet + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + egressCIDRs: + description: EgressCIDRs is the list of CIDR ranges available for automatically + assigning egress IPs to this node from. If this field is set then EgressIPs + should be treated as read-only. + type: array + items: + description: HostSubnetEgressCIDR represents one egress CIDR from which + to assign IP addresses for this node represented by the HostSubnet + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + egressIPs: + description: EgressIPs is the list of automatic egress IP addresses currently + hosted by this node. If EgressCIDRs is empty, this can be set by hand; + if EgressCIDRs is set then the master will overwrite the value here with + its own allocation of egress IPs. + type: array + items: + description: HostSubnetEgressIP represents one egress IP address currently + hosted on the node represented by HostSubnet + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$ + host: + description: Host is the name of the node. (This is the same as the object's + name, but both fields must be set.) + type: string + pattern: ^[a-z0-9.-]+$ + hostIP: + description: HostIP is the IP address to be used as a VTEP by other nodes + in the overlay network + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$ + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + subnet: + description: Subnet is the CIDR range of the overlay network assigned to + the node for its pods + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + additionalPrinterColumns: + - name: Host + type: string + description: The name of the node + JSONPath: .host + - name: Host IP + type: string + description: The IP address to be used as a VTEP by other nodes in the overlay + network + JSONPath: .hostIP + - name: Subnet + type: string + description: The CIDR range of the overlay network assigned to the node for its + pods + JSONPath: .subnet + - name: Egress CIDRs + type: string + description: The network egress CIDRs + JSONPath: .egressCIDRs + - name: Egress IPs + type: string + description: The network egress IP addresses + JSONPath: .egressIPs + version: v1 + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/network/v1/003-netnamespace-crd.yaml b/vendor/github.com/openshift/api/network/v1/003-netnamespace-crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..80fa3b05f528dcb48d76cdc5c3755dd95348958c --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/003-netnamespace-crd.yaml @@ -0,0 +1,83 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: netnamespaces.network.openshift.io +spec: + group: network.openshift.io + names: + kind: NetNamespace + listKind: NetNamespaceList + plural: netnamespaces + singular: netnamespace + scope: Cluster + validation: + # As compared to ValidateNetNamespace, this does not validate that: + # - .netname == .name + # - .netid is not 1-9 + openAPIV3Schema: + description: NetNamespace describes a single isolated network. When using the + redhat/openshift-ovs-multitenant plugin, every Namespace will have a corresponding + NetNamespace object with the same name. (When using redhat/openshift-ovs-subnet, + NetNamespaces are not used.) + type: object + required: + - netid + - netname + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + egressIPs: + description: EgressIPs is a list of reserved IPs that will be used as the + source for external traffic coming from pods in this namespace. (If empty, + external traffic will be masqueraded to Node IPs.) + type: array + items: + description: NetNamespaceEgressIP is a single egress IP out of a list + of reserved IPs used as source of external traffic coming from pods + in this namespace + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$ + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + netid: + description: NetID is the network identifier of the network namespace assigned + to each overlay network packet. This can be manipulated with the "oc adm + pod-network" commands. + type: integer + format: int32 + maximum: 16777215 + minimum: 0 + netname: + description: NetName is the name of the network namespace. (This is the + same as the object's name, but both fields must be set.) + type: string + pattern: ^[a-z0-9.-]+$ + additionalPrinterColumns: + - name: NetID + type: integer + description: The network identifier of the network namespace + JSONPath: .netid + - name: Egress IPs + type: string + description: The network egress IP addresses + JSONPath: .egressIPs + version: v1 + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/network/v1/004-egressnetworkpolicy-crd.yaml b/vendor/github.com/openshift/api/network/v1/004-egressnetworkpolicy-crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..16e84c1040e820ddbc7ad04f50a9716da8a904b1 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/004-egressnetworkpolicy-crd.yaml @@ -0,0 +1,88 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: egressnetworkpolicies.network.openshift.io +spec: + group: network.openshift.io + names: + kind: EgressNetworkPolicy + listKind: EgressNetworkPolicyList + plural: egressnetworkpolicies + singular: egressnetworkpolicy + scope: Namespaced + validation: + # This should be mostly equivalent to ValidateEgressNetworkPolicy + openAPIV3Schema: + description: EgressNetworkPolicy describes the current egress network policy + for a Namespace. When using the 'redhat/openshift-ovs-multitenant' network + plugin, traffic from a pod to an IP address outside the cluster will be checked + against each EgressNetworkPolicyRule in the pod's namespace's EgressNetworkPolicy, + in order. If no rule matches (or no EgressNetworkPolicy is present) then the + traffic will be allowed by default. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the current egress network policy + type: object + required: + - egress + properties: + egress: + description: egress contains the list of egress policy rules + type: array + items: + description: EgressNetworkPolicyRule contains a single egress network + policy rule + type: object + required: + - to + - type + properties: + to: + description: to is the target that traffic is allowed/denied to + type: object + properties: + cidrSelector: + description: CIDRSelector is the CIDR range to allow/deny + traffic to. If this is set, dnsName must be unset Ideally + we would have liked to use the cidr openapi format for this + property. But openshift-sdn only supports v4 while specifying + the cidr format allows both v4 and v6 cidrs We are therefore + using a regex pattern to validate instead. + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + dnsName: + description: DNSName is the domain name to allow/deny traffic + to. If this is set, cidrSelector must be unset + type: string + pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ + type: + description: type marks this as an "Allow" or "Deny" rule + type: string + pattern: ^Allow|Deny$ + version: v1 + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/network/v1/constants.go b/vendor/github.com/openshift/api/network/v1/constants.go new file mode 100644 index 0000000000000000000000000000000000000000..54c06f331909e5bf028d6954b51509621b746602 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/constants.go @@ -0,0 +1,17 @@ +package v1 + +const ( + // Pod annotations + AssignMacvlanAnnotation = "pod.network.openshift.io/assign-macvlan" + + // HostSubnet annotations. (Note: should be "hostsubnet.network.openshift.io/", but the incorrect name is now part of the API.) + AssignHostSubnetAnnotation = "pod.network.openshift.io/assign-subnet" + FixedVNIDHostAnnotation = "pod.network.openshift.io/fixed-vnid-host" + NodeUIDAnnotation = "pod.network.openshift.io/node-uid" + + // NetNamespace annotations + MulticastEnabledAnnotation = "netnamespace.network.openshift.io/multicast-enabled" + + // ChangePodNetworkAnnotation is an annotation on NetNamespace to request change of pod network + ChangePodNetworkAnnotation string = "pod.network.openshift.io/multitenant.change-network" +) diff --git a/vendor/github.com/openshift/api/network/v1/doc.go b/vendor/github.com/openshift/api/network/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..2816420d9688c8c9a626c7eb6b43dcf2dc3d51cc --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/network/apis/network +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=network.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/network/v1/generated.pb.go b/vendor/github.com/openshift/api/network/v1/generated.pb.go new file mode 100644 index 0000000000000000000000000000000000000000..34073bd902c32d89cbbc4eb5ae68130181958f90 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/generated.pb.go @@ -0,0 +1,3248 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/network/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *ClusterNetwork) Reset() { *m = ClusterNetwork{} } +func (*ClusterNetwork) ProtoMessage() {} +func (*ClusterNetwork) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{0} +} +func (m *ClusterNetwork) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterNetwork) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterNetwork) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterNetwork.Merge(m, src) +} +func (m *ClusterNetwork) XXX_Size() int { + return m.Size() +} +func (m *ClusterNetwork) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterNetwork.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterNetwork proto.InternalMessageInfo + +func (m *ClusterNetworkEntry) Reset() { *m = ClusterNetworkEntry{} } +func (*ClusterNetworkEntry) ProtoMessage() {} +func (*ClusterNetworkEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{1} +} +func (m *ClusterNetworkEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterNetworkEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterNetworkEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterNetworkEntry.Merge(m, src) +} +func (m *ClusterNetworkEntry) XXX_Size() int { + return m.Size() +} +func (m *ClusterNetworkEntry) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterNetworkEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterNetworkEntry proto.InternalMessageInfo + +func (m *ClusterNetworkList) Reset() { *m = ClusterNetworkList{} } +func (*ClusterNetworkList) ProtoMessage() {} +func (*ClusterNetworkList) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{2} +} +func (m *ClusterNetworkList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterNetworkList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterNetworkList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterNetworkList.Merge(m, src) +} +func (m *ClusterNetworkList) XXX_Size() int { + return m.Size() +} +func (m *ClusterNetworkList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterNetworkList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterNetworkList proto.InternalMessageInfo + +func (m *EgressNetworkPolicy) Reset() { *m = EgressNetworkPolicy{} } +func (*EgressNetworkPolicy) ProtoMessage() {} +func (*EgressNetworkPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{3} +} +func (m *EgressNetworkPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressNetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressNetworkPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressNetworkPolicy.Merge(m, src) +} +func (m *EgressNetworkPolicy) XXX_Size() int { + return m.Size() +} +func (m *EgressNetworkPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_EgressNetworkPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressNetworkPolicy proto.InternalMessageInfo + +func (m *EgressNetworkPolicyList) Reset() { *m = EgressNetworkPolicyList{} } +func (*EgressNetworkPolicyList) ProtoMessage() {} +func (*EgressNetworkPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{4} +} +func (m *EgressNetworkPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressNetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressNetworkPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressNetworkPolicyList.Merge(m, src) +} +func (m *EgressNetworkPolicyList) XXX_Size() int { + return m.Size() +} +func (m *EgressNetworkPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_EgressNetworkPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressNetworkPolicyList proto.InternalMessageInfo + +func (m *EgressNetworkPolicyPeer) Reset() { *m = EgressNetworkPolicyPeer{} } +func (*EgressNetworkPolicyPeer) ProtoMessage() {} +func (*EgressNetworkPolicyPeer) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{5} +} +func (m *EgressNetworkPolicyPeer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressNetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressNetworkPolicyPeer) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressNetworkPolicyPeer.Merge(m, src) +} +func (m *EgressNetworkPolicyPeer) XXX_Size() int { + return m.Size() +} +func (m *EgressNetworkPolicyPeer) XXX_DiscardUnknown() { + xxx_messageInfo_EgressNetworkPolicyPeer.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressNetworkPolicyPeer proto.InternalMessageInfo + +func (m *EgressNetworkPolicyRule) Reset() { *m = EgressNetworkPolicyRule{} } +func (*EgressNetworkPolicyRule) ProtoMessage() {} +func (*EgressNetworkPolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{6} +} +func (m *EgressNetworkPolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressNetworkPolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressNetworkPolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressNetworkPolicyRule.Merge(m, src) +} +func (m *EgressNetworkPolicyRule) XXX_Size() int { + return m.Size() +} +func (m *EgressNetworkPolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_EgressNetworkPolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressNetworkPolicyRule proto.InternalMessageInfo + +func (m *EgressNetworkPolicySpec) Reset() { *m = EgressNetworkPolicySpec{} } +func (*EgressNetworkPolicySpec) ProtoMessage() {} +func (*EgressNetworkPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{7} +} +func (m *EgressNetworkPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressNetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressNetworkPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressNetworkPolicySpec.Merge(m, src) +} +func (m *EgressNetworkPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *EgressNetworkPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_EgressNetworkPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressNetworkPolicySpec proto.InternalMessageInfo + +func (m *HostSubnet) Reset() { *m = HostSubnet{} } +func (*HostSubnet) ProtoMessage() {} +func (*HostSubnet) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{8} +} +func (m *HostSubnet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostSubnet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostSubnet) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostSubnet.Merge(m, src) +} +func (m *HostSubnet) XXX_Size() int { + return m.Size() +} +func (m *HostSubnet) XXX_DiscardUnknown() { + xxx_messageInfo_HostSubnet.DiscardUnknown(m) +} + +var xxx_messageInfo_HostSubnet proto.InternalMessageInfo + +func (m *HostSubnetList) Reset() { *m = HostSubnetList{} } +func (*HostSubnetList) ProtoMessage() {} +func (*HostSubnetList) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{9} +} +func (m *HostSubnetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostSubnetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostSubnetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostSubnetList.Merge(m, src) +} +func (m *HostSubnetList) XXX_Size() int { + return m.Size() +} +func (m *HostSubnetList) XXX_DiscardUnknown() { + xxx_messageInfo_HostSubnetList.DiscardUnknown(m) +} + +var xxx_messageInfo_HostSubnetList proto.InternalMessageInfo + +func (m *NetNamespace) Reset() { *m = NetNamespace{} } +func (*NetNamespace) ProtoMessage() {} +func (*NetNamespace) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{10} +} +func (m *NetNamespace) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetNamespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetNamespace) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetNamespace.Merge(m, src) +} +func (m *NetNamespace) XXX_Size() int { + return m.Size() +} +func (m *NetNamespace) XXX_DiscardUnknown() { + xxx_messageInfo_NetNamespace.DiscardUnknown(m) +} + +var xxx_messageInfo_NetNamespace proto.InternalMessageInfo + +func (m *NetNamespaceList) Reset() { *m = NetNamespaceList{} } +func (*NetNamespaceList) ProtoMessage() {} +func (*NetNamespaceList) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{11} +} +func (m *NetNamespaceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetNamespaceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetNamespaceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetNamespaceList.Merge(m, src) +} +func (m *NetNamespaceList) XXX_Size() int { + return m.Size() +} +func (m *NetNamespaceList) XXX_DiscardUnknown() { + xxx_messageInfo_NetNamespaceList.DiscardUnknown(m) +} + +var xxx_messageInfo_NetNamespaceList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ClusterNetwork)(nil), "github.com.openshift.api.network.v1.ClusterNetwork") + proto.RegisterType((*ClusterNetworkEntry)(nil), "github.com.openshift.api.network.v1.ClusterNetworkEntry") + proto.RegisterType((*ClusterNetworkList)(nil), "github.com.openshift.api.network.v1.ClusterNetworkList") + proto.RegisterType((*EgressNetworkPolicy)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicy") + proto.RegisterType((*EgressNetworkPolicyList)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicyList") + proto.RegisterType((*EgressNetworkPolicyPeer)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicyPeer") + proto.RegisterType((*EgressNetworkPolicyRule)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicyRule") + proto.RegisterType((*EgressNetworkPolicySpec)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicySpec") + proto.RegisterType((*HostSubnet)(nil), "github.com.openshift.api.network.v1.HostSubnet") + proto.RegisterType((*HostSubnetList)(nil), "github.com.openshift.api.network.v1.HostSubnetList") + proto.RegisterType((*NetNamespace)(nil), "github.com.openshift.api.network.v1.NetNamespace") + proto.RegisterType((*NetNamespaceList)(nil), "github.com.openshift.api.network.v1.NetNamespaceList") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/network/v1/generated.proto", fileDescriptor_38d1cb27735fa5d9) +} + +var fileDescriptor_38d1cb27735fa5d9 = []byte{ + // 995 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0xe3, 0x44, + 0x14, 0x8f, 0xf3, 0xa7, 0x6d, 0x26, 0x6d, 0x5a, 0xcd, 0x56, 0xac, 0x29, 0x52, 0x12, 0xb9, 0x02, + 0x82, 0x56, 0xd8, 0xa4, 0x8b, 0x50, 0x0f, 0x08, 0xb4, 0x6e, 0x2b, 0x6d, 0xa4, 0x6e, 0x88, 0x26, + 0x65, 0x55, 0x21, 0x40, 0xb8, 0xce, 0xac, 0x63, 0x9a, 0xd8, 0x96, 0x67, 0x12, 0x88, 0x10, 0x7f, + 0x2e, 0xdc, 0xf9, 0x00, 0x7c, 0x0c, 0x3e, 0x02, 0x87, 0x1e, 0x38, 0xec, 0x09, 0xf6, 0x14, 0x51, + 0x73, 0xe7, 0x03, 0xf4, 0x84, 0x66, 0x3c, 0x8e, 0xed, 0xac, 0x2b, 0xa2, 0x22, 0x72, 0x4a, 0xe6, + 0xfd, 0xde, 0xdf, 0xf9, 0xbd, 0xf7, 0xc6, 0xe0, 0xa1, 0x65, 0xd3, 0xc1, 0xf8, 0x42, 0x35, 0xdd, + 0x91, 0xe6, 0x7a, 0xd8, 0x21, 0x03, 0xfb, 0x19, 0xd5, 0x0c, 0xcf, 0xd6, 0x1c, 0x4c, 0xbf, 0x72, + 0xfd, 0x4b, 0x6d, 0xd2, 0xd2, 0x2c, 0xec, 0x60, 0xdf, 0xa0, 0xb8, 0xaf, 0x7a, 0xbe, 0x4b, 0x5d, + 0xb8, 0x1f, 0x1b, 0xa9, 0x73, 0x23, 0xd5, 0xf0, 0x6c, 0x55, 0x18, 0xa9, 0x93, 0xd6, 0xde, 0xdb, + 0x09, 0xcf, 0x96, 0x6b, 0xb9, 0x1a, 0xb7, 0xbd, 0x18, 0x3f, 0xe3, 0x27, 0x7e, 0xe0, 0xff, 0x42, + 0x9f, 0x7b, 0xef, 0x5e, 0x1e, 0x12, 0xd5, 0x76, 0x59, 0xe8, 0x91, 0x61, 0x0e, 0x6c, 0x07, 0xfb, + 0x53, 0xcd, 0xbb, 0xb4, 0x98, 0x80, 0x68, 0x23, 0x4c, 0x8d, 0x8c, 0x4c, 0xf6, 0xde, 0xbb, 0xcd, + 0xca, 0x1f, 0x3b, 0xd4, 0x1e, 0x61, 0x8d, 0x98, 0x03, 0x3c, 0x32, 0x16, 0xed, 0x94, 0x9f, 0x8b, + 0xa0, 0x7a, 0x34, 0x1c, 0x13, 0x8a, 0xfd, 0x4e, 0x98, 0x32, 0xfc, 0x02, 0x6c, 0xb0, 0x28, 0x7d, + 0x83, 0x1a, 0xb2, 0xd4, 0x90, 0x9a, 0x95, 0x83, 0x77, 0xd4, 0xd0, 0xbb, 0x9a, 0xf4, 0xae, 0x7a, + 0x97, 0x16, 0x13, 0x10, 0x95, 0x69, 0xab, 0x93, 0x96, 0xfa, 0xd1, 0xc5, 0x97, 0xd8, 0xa4, 0x4f, + 0x30, 0x35, 0x74, 0x78, 0x35, 0xab, 0xe7, 0x82, 0x59, 0x1d, 0xc4, 0x32, 0x34, 0xf7, 0x0a, 0xdf, + 0x02, 0xeb, 0xe2, 0x7e, 0xe4, 0x7c, 0x43, 0x6a, 0x96, 0xf5, 0x6d, 0xa1, 0xbe, 0x2e, 0x72, 0x40, + 0x11, 0x0e, 0x8f, 0xc1, 0xce, 0xc0, 0x25, 0x94, 0x8c, 0x2f, 0x1c, 0x4c, 0x87, 0xd8, 0xb1, 0xe8, + 0x40, 0x2e, 0x34, 0xa4, 0xe6, 0x96, 0x2e, 0x0b, 0x9b, 0x9d, 0xc7, 0x2e, 0xa1, 0x3d, 0x8e, 0x9f, + 0x72, 0x1c, 0xbd, 0x64, 0x01, 0x3f, 0x00, 0x55, 0x82, 0xfd, 0x89, 0x6d, 0x62, 0x11, 0x40, 0x2e, + 0xf2, 0xb8, 0xaf, 0x08, 0x1f, 0xd5, 0x5e, 0x0a, 0x45, 0x0b, 0xda, 0xf0, 0x00, 0x00, 0x6f, 0x38, + 0xb6, 0x6c, 0xa7, 0x63, 0x8c, 0xb0, 0x5c, 0xe2, 0xb6, 0xf3, 0x12, 0xbb, 0x73, 0x04, 0x25, 0xb4, + 0xe0, 0x37, 0x60, 0xdb, 0x4c, 0x5d, 0x2c, 0x91, 0xd7, 0x1a, 0x85, 0x66, 0xe5, 0xe0, 0x50, 0x5d, + 0xa2, 0x6b, 0xd4, 0x34, 0x29, 0x27, 0x0e, 0xf5, 0xa7, 0xfa, 0x7d, 0x11, 0x72, 0x3b, 0x0d, 0x12, + 0xb4, 0x18, 0x09, 0x3e, 0x00, 0xe5, 0xc9, 0xd7, 0x43, 0xc3, 0xe9, 0xba, 0x3e, 0x95, 0xd7, 0xf9, + 0x7d, 0x6d, 0x05, 0xb3, 0x7a, 0xf9, 0xe9, 0xf9, 0xe9, 0xa3, 0x0e, 0x13, 0xa2, 0x18, 0x87, 0xaf, + 0x82, 0xc2, 0x88, 0x8e, 0xe5, 0x0d, 0xae, 0xb6, 0x1e, 0xcc, 0xea, 0x85, 0x27, 0x67, 0x1f, 0x23, + 0x26, 0x53, 0xbe, 0x05, 0xf7, 0x32, 0x12, 0x81, 0x0d, 0x50, 0x34, 0xed, 0xbe, 0xcf, 0xdb, 0xa3, + 0xac, 0x6f, 0x8a, 0xb4, 0x8a, 0x47, 0xed, 0x63, 0x84, 0x38, 0x12, 0xf1, 0x96, 0xe4, 0x85, 0x73, + 0xfd, 0xaf, 0xbc, 0x25, 0x25, 0xca, 0x6f, 0x12, 0x80, 0xe9, 0xf8, 0xa7, 0x36, 0xa1, 0xf0, 0xd3, + 0x97, 0x3a, 0x54, 0x5d, 0xae, 0x43, 0x99, 0x35, 0xef, 0xcf, 0x1d, 0x91, 0xc4, 0x46, 0x24, 0x49, + 0x74, 0xe7, 0x39, 0x28, 0xd9, 0x14, 0x8f, 0x88, 0x9c, 0xe7, 0x74, 0x3d, 0xbc, 0x03, 0x5d, 0xfa, + 0x96, 0xf0, 0x5f, 0x6a, 0x33, 0x4f, 0x28, 0x74, 0xa8, 0xfc, 0x21, 0x81, 0x7b, 0x27, 0x96, 0x8f, + 0x09, 0x11, 0x7a, 0x5d, 0x77, 0x68, 0x9b, 0xd3, 0x15, 0x4c, 0xdc, 0xe7, 0xa0, 0x48, 0x3c, 0x6c, + 0x72, 0x0a, 0x2a, 0x07, 0xef, 0x2f, 0x55, 0x52, 0x46, 0xa6, 0x3d, 0x0f, 0x9b, 0x31, 0xdd, 0xec, + 0x84, 0xb8, 0x5f, 0xe5, 0x77, 0x09, 0xdc, 0xcf, 0xd0, 0x5f, 0x01, 0x5b, 0x9f, 0xa5, 0xd9, 0x3a, + 0xbc, 0x6b, 0x69, 0xb7, 0x50, 0xf6, 0x5d, 0x66, 0x5d, 0x5d, 0x8c, 0x7d, 0x78, 0x08, 0x36, 0x59, + 0xab, 0xf7, 0xf0, 0x10, 0x9b, 0xd4, 0x8d, 0x86, 0x61, 0x57, 0xb8, 0xd9, 0x64, 0xc3, 0x10, 0x61, + 0x28, 0xa5, 0xc9, 0xf6, 0x5f, 0xdf, 0x21, 0x7c, 0x97, 0x2c, 0xec, 0xbf, 0xe3, 0x4e, 0x8f, 0x2f, + 0x92, 0x08, 0x57, 0x7e, 0xc9, 0xbe, 0x58, 0x34, 0x1e, 0x62, 0xf8, 0x21, 0x28, 0xd2, 0xa9, 0x87, + 0x45, 0xe0, 0x07, 0x11, 0x2d, 0x67, 0x53, 0x0f, 0xdf, 0xcc, 0xea, 0xaf, 0xdd, 0x62, 0xc6, 0x60, + 0xc4, 0x0d, 0xe1, 0x39, 0xc8, 0x53, 0xf7, 0xbf, 0xf6, 0x04, 0xbb, 0x0b, 0x1d, 0x88, 0xe0, 0xf9, + 0x33, 0x17, 0xe5, 0xa9, 0xab, 0x7c, 0x9f, 0x99, 0x35, 0x6b, 0x18, 0xd8, 0x07, 0x6b, 0x98, 0x43, + 0xb2, 0xc4, 0x19, 0xbb, 0x73, 0x60, 0x56, 0x8c, 0x5e, 0x15, 0x81, 0xd7, 0x42, 0x05, 0x24, 0x7c, + 0x2b, 0x7f, 0xe7, 0x01, 0x88, 0x17, 0xcc, 0x0a, 0x26, 0xac, 0x01, 0x8a, 0x6c, 0x7d, 0x09, 0x42, + 0xe7, 0x33, 0xc2, 0x72, 0x40, 0x1c, 0x81, 0x6f, 0x80, 0x35, 0xf6, 0xdb, 0xee, 0xf2, 0x07, 0xac, + 0x1c, 0xa7, 0xfe, 0x98, 0x4b, 0x91, 0x40, 0x99, 0x5e, 0xf8, 0x78, 0x89, 0x47, 0x6a, 0xae, 0x17, + 0xd6, 0x82, 0x04, 0x0a, 0x1f, 0x81, 0x72, 0x58, 0x6c, 0xbb, 0x4b, 0xe4, 0x52, 0xa3, 0xd0, 0x2c, + 0xeb, 0xfb, 0x6c, 0xc7, 0x9f, 0x44, 0xc2, 0x9b, 0x59, 0x1d, 0xc6, 0x77, 0x10, 0x89, 0x51, 0x6c, + 0x05, 0xdb, 0xa0, 0x12, 0x1e, 0x58, 0xb3, 0x86, 0xef, 0x53, 0x59, 0x7f, 0x33, 0x98, 0xd5, 0x2b, + 0x27, 0xb1, 0xf8, 0x66, 0x56, 0xdf, 0x5d, 0x74, 0xc3, 0x37, 0x7d, 0xd2, 0x56, 0xf9, 0x55, 0x02, + 0xd5, 0xc4, 0x46, 0xff, 0xff, 0x07, 0xff, 0x2c, 0x3d, 0xf8, 0xda, 0x52, 0x6d, 0x14, 0x67, 0x78, + 0xcb, 0xbc, 0xff, 0x98, 0x07, 0x9b, 0x1d, 0x4c, 0xd9, 0xec, 0x11, 0xcf, 0x30, 0xf1, 0xca, 0xbe, + 0x86, 0x9c, 0x8c, 0x6d, 0x20, 0x12, 0x41, 0x11, 0x0e, 0xf7, 0x41, 0xc9, 0xc1, 0xd4, 0xee, 0x8b, + 0x4f, 0xa0, 0x79, 0x09, 0x1d, 0x4c, 0xdb, 0xc7, 0x28, 0xc4, 0xe0, 0x51, 0xb2, 0x2f, 0x8a, 0x9c, + 0xd2, 0xd7, 0x17, 0xfb, 0x62, 0x37, 0x59, 0x63, 0x46, 0x67, 0x28, 0x57, 0x12, 0xd8, 0x49, 0xea, + 0xac, 0x80, 0xd0, 0xa7, 0x69, 0x42, 0x5b, 0x4b, 0x11, 0x9a, 0xcc, 0x31, 0x9b, 0x52, 0xbd, 0x79, + 0x75, 0x5d, 0xcb, 0x3d, 0xbf, 0xae, 0xe5, 0x5e, 0x5c, 0xd7, 0x72, 0x3f, 0x04, 0x35, 0xe9, 0x2a, + 0xa8, 0x49, 0xcf, 0x83, 0x9a, 0xf4, 0x22, 0xa8, 0x49, 0x7f, 0x06, 0x35, 0xe9, 0xa7, 0xbf, 0x6a, + 0xb9, 0x4f, 0xf2, 0x93, 0xd6, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc3, 0xa5, 0xdd, 0x7e, 0x04, + 0x0c, 0x00, 0x00, +} + +func (m *ClusterNetwork) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterNetwork) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterNetwork) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MTU != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MTU)) + i-- + dAtA[i] = 0x40 + } + if m.VXLANPort != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.VXLANPort)) + i-- + dAtA[i] = 0x38 + } + if len(m.ClusterNetworks) > 0 { + for iNdEx := len(m.ClusterNetworks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClusterNetworks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.PluginName) + copy(dAtA[i:], m.PluginName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PluginName))) + i-- + dAtA[i] = 0x2a + i -= len(m.ServiceNetwork) + copy(dAtA[i:], m.ServiceNetwork) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceNetwork))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.HostSubnetLength)) + i-- + dAtA[i] = 0x18 + i -= len(m.Network) + copy(dAtA[i:], m.Network) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Network))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterNetworkEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterNetworkEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterNetworkEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.HostSubnetLength)) + i-- + dAtA[i] = 0x10 + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterNetworkList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterNetworkList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterNetworkList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressNetworkPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressNetworkPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressNetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressNetworkPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressNetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressNetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressNetworkPolicyPeer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressNetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressNetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DNSName) + copy(dAtA[i:], m.DNSName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSName))) + i-- + dAtA[i] = 0x12 + i -= len(m.CIDRSelector) + copy(dAtA[i:], m.CIDRSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRSelector))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressNetworkPolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressNetworkPolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressNetworkPolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressNetworkPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressNetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressNetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Egress) > 0 { + for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HostSubnet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostSubnet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostSubnet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.EgressCIDRs) > 0 { + for iNdEx := len(m.EgressCIDRs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.EgressCIDRs[iNdEx]) + copy(dAtA[i:], m.EgressCIDRs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EgressCIDRs[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.EgressIPs) > 0 { + for iNdEx := len(m.EgressIPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.EgressIPs[iNdEx]) + copy(dAtA[i:], m.EgressIPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EgressIPs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.Subnet) + copy(dAtA[i:], m.Subnet) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subnet))) + i-- + dAtA[i] = 0x22 + i -= len(m.HostIP) + copy(dAtA[i:], m.HostIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i-- + dAtA[i] = 0x1a + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HostSubnetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostSubnetList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostSubnetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NetNamespace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetNamespace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetNamespace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.EgressIPs) > 0 { + for iNdEx := len(m.EgressIPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.EgressIPs[iNdEx]) + copy(dAtA[i:], m.EgressIPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EgressIPs[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.NetID)) + i-- + dAtA[i] = 0x18 + i -= len(m.NetName) + copy(dAtA[i:], m.NetName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NetName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NetNamespaceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetNamespaceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetNamespaceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClusterNetwork) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Network) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HostSubnetLength)) + l = len(m.ServiceNetwork) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PluginName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ClusterNetworks) > 0 { + for _, e := range m.ClusterNetworks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.VXLANPort != nil { + n += 1 + sovGenerated(uint64(*m.VXLANPort)) + } + if m.MTU != nil { + n += 1 + sovGenerated(uint64(*m.MTU)) + } + return n +} + +func (m *ClusterNetworkEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CIDR) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HostSubnetLength)) + return n +} + +func (m *ClusterNetworkList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EgressNetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EgressNetworkPolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EgressNetworkPolicyPeer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CIDRSelector) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DNSName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EgressNetworkPolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EgressNetworkPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Egress) > 0 { + for _, e := range m.Egress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HostSubnet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subnet) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.EgressIPs) > 0 { + for _, s := range m.EgressIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.EgressCIDRs) > 0 { + for _, s := range m.EgressCIDRs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HostSubnetList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetNamespace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NetName) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.NetID)) + if len(m.EgressIPs) > 0 { + for _, s := range m.EgressIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetNamespaceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterNetwork) String() string { + if this == nil { + return "nil" + } + repeatedStringForClusterNetworks := "[]ClusterNetworkEntry{" + for _, f := range this.ClusterNetworks { + repeatedStringForClusterNetworks += strings.Replace(strings.Replace(f.String(), "ClusterNetworkEntry", "ClusterNetworkEntry", 1), `&`, ``, 1) + "," + } + repeatedStringForClusterNetworks += "}" + s := strings.Join([]string{`&ClusterNetwork{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Network:` + fmt.Sprintf("%v", this.Network) + `,`, + `HostSubnetLength:` + fmt.Sprintf("%v", this.HostSubnetLength) + `,`, + `ServiceNetwork:` + fmt.Sprintf("%v", this.ServiceNetwork) + `,`, + `PluginName:` + fmt.Sprintf("%v", this.PluginName) + `,`, + `ClusterNetworks:` + repeatedStringForClusterNetworks + `,`, + `VXLANPort:` + valueToStringGenerated(this.VXLANPort) + `,`, + `MTU:` + valueToStringGenerated(this.MTU) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterNetworkEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterNetworkEntry{`, + `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, + `HostSubnetLength:` + fmt.Sprintf("%v", this.HostSubnetLength) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterNetworkList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterNetwork{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterNetwork", "ClusterNetwork", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterNetworkList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EgressNetworkPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EgressNetworkPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "EgressNetworkPolicySpec", "EgressNetworkPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EgressNetworkPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]EgressNetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EgressNetworkPolicy", "EgressNetworkPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EgressNetworkPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EgressNetworkPolicyPeer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EgressNetworkPolicyPeer{`, + `CIDRSelector:` + fmt.Sprintf("%v", this.CIDRSelector) + `,`, + `DNSName:` + fmt.Sprintf("%v", this.DNSName) + `,`, + `}`, + }, "") + return s +} +func (this *EgressNetworkPolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EgressNetworkPolicyRule{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `To:` + strings.Replace(strings.Replace(this.To.String(), "EgressNetworkPolicyPeer", "EgressNetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EgressNetworkPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForEgress := "[]EgressNetworkPolicyRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "EgressNetworkPolicyRule", "EgressNetworkPolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForEgress += "}" + s := strings.Join([]string{`&EgressNetworkPolicySpec{`, + `Egress:` + repeatedStringForEgress + `,`, + `}`, + }, "") + return s +} +func (this *HostSubnet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostSubnet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `Subnet:` + fmt.Sprintf("%v", this.Subnet) + `,`, + `EgressIPs:` + fmt.Sprintf("%v", this.EgressIPs) + `,`, + `EgressCIDRs:` + fmt.Sprintf("%v", this.EgressCIDRs) + `,`, + `}`, + }, "") + return s +} +func (this *HostSubnetList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]HostSubnet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HostSubnet", "HostSubnet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&HostSubnetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NetNamespace) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetNamespace{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `NetName:` + fmt.Sprintf("%v", this.NetName) + `,`, + `NetID:` + fmt.Sprintf("%v", this.NetID) + `,`, + `EgressIPs:` + fmt.Sprintf("%v", this.EgressIPs) + `,`, + `}`, + }, "") + return s +} +func (this *NetNamespaceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]NetNamespace{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetNamespace", "NetNamespace", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NetNamespaceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterNetwork) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterNetwork: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterNetwork: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Network = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostSubnetLength", wireType) + } + m.HostSubnetLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostSubnetLength |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceNetwork", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceNetwork = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PluginName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PluginName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterNetworks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterNetworks = append(m.ClusterNetworks, ClusterNetworkEntry{}) + if err := m.ClusterNetworks[len(m.ClusterNetworks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VXLANPort", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.VXLANPort = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MTU", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MTU = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterNetworkEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterNetworkEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterNetworkEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CIDR = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostSubnetLength", wireType) + } + m.HostSubnetLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostSubnetLength |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterNetworkList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterNetworkList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterNetworkList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterNetwork{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressNetworkPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressNetworkPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressNetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressNetworkPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressNetworkPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressNetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, EgressNetworkPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressNetworkPolicyPeer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressNetworkPolicyPeer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressNetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CIDRSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CIDRSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DNSName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressNetworkPolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressNetworkPolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressNetworkPolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = EgressNetworkPolicyRuleType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressNetworkPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressNetworkPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressNetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Egress = append(m.Egress, EgressNetworkPolicyRule{}) + if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostSubnet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostSubnet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostSubnet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subnet", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subnet = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EgressIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EgressIPs = append(m.EgressIPs, HostSubnetEgressIP(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EgressCIDRs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EgressCIDRs = append(m.EgressCIDRs, HostSubnetEgressCIDR(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostSubnetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostSubnetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostSubnetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, HostSubnet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetNamespace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetNamespace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetNamespace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NetID", wireType) + } + m.NetID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NetID |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EgressIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EgressIPs = append(m.EgressIPs, NetNamespaceEgressIP(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetNamespaceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetNamespaceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetNamespaceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, NetNamespace{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/openshift/api/network/v1/generated.proto b/vendor/github.com/openshift/api/network/v1/generated.proto new file mode 100644 index 0000000000000000000000000000000000000000..e57003f4fcda398076d4c58d43fd836fa0c2261d --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/generated.proto @@ -0,0 +1,203 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package github.com.openshift.api.network.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// ClusterNetwork describes the cluster network. There is normally only one object of this type, +// named "default", which is created by the SDN network plugin based on the master configuration +// when the cluster is brought up for the first time. +// +kubebuilder:resource:scope="Cluster" +// +kubebuilder:printcolumn:name="Cluster Network",type=string,JSONPath=`.network`,description="The primary cluster network CIDR" +// +kubebuilder:printcolumn:name="Service Network",type=string,JSONPath=`.serviceNetwork`,description="The service network CIDR" +// +kubebuilder:printcolumn:name="Plugin Name",type=string,JSONPath=`.pluginName`,description="The Openshift SDN network plug-in in use" +message ClusterNetwork { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Network is a CIDR string specifying the global overlay network's L3 space + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + optional string network = 2; + + // HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=30 + optional uint32 hostsubnetlength = 3; + + // ServiceNetwork is the CIDR range that Service IP addresses are allocated from + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + optional string serviceNetwork = 4; + + // PluginName is the name of the network plugin being used + optional string pluginName = 5; + + // ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from. + repeated ClusterNetworkEntry clusterNetworks = 6; + + // VXLANPort sets the VXLAN destination port used by the cluster. + // It is set by the master configuration file on startup and cannot be edited manually. + // Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. + // Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Optional + // +optional + optional uint32 vxlanPort = 7; + + // MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator. + // +kubebuilder:validation:Minimum=576 + // +kubebuilder:validation:Maximum=65536 + // +kubebuilder:validation:Optional + // +optional + optional uint32 mtu = 8; +} + +// ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips. +message ClusterNetworkEntry { + // CIDR defines the total range of a cluster networks address space. + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + optional string cidr = 1; + + // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods. + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=30 + optional uint32 hostSubnetLength = 2; +} + +// ClusterNetworkList is a collection of ClusterNetworks +message ClusterNetworkList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of cluster networks + repeated ClusterNetwork items = 2; +} + +// EgressNetworkPolicy describes the current egress network policy for a Namespace. When using +// the 'redhat/openshift-ovs-multitenant' network plugin, traffic from a pod to an IP address +// outside the cluster will be checked against each EgressNetworkPolicyRule in the pod's +// namespace's EgressNetworkPolicy, in order. If no rule matches (or no EgressNetworkPolicy +// is present) then the traffic will be allowed by default. +message EgressNetworkPolicy { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the specification of the current egress network policy + optional EgressNetworkPolicySpec spec = 2; +} + +// EgressNetworkPolicyList is a collection of EgressNetworkPolicy +message EgressNetworkPolicyList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of policies + repeated EgressNetworkPolicy items = 2; +} + +// EgressNetworkPolicyPeer specifies a target to apply egress network policy to +message EgressNetworkPolicyPeer { + // CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset + // Ideally we would have liked to use the cidr openapi format for this property. + // But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs + // We are therefore using a regex pattern to validate instead. + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + optional string cidrSelector = 1; + + // DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset + // +kubebuilder:validation:Pattern=`^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$` + optional string dnsName = 2; +} + +// EgressNetworkPolicyRule contains a single egress network policy rule +message EgressNetworkPolicyRule { + // type marks this as an "Allow" or "Deny" rule + optional string type = 1; + + // to is the target that traffic is allowed/denied to + optional EgressNetworkPolicyPeer to = 2; +} + +// EgressNetworkPolicySpec provides a list of policies on outgoing network traffic +message EgressNetworkPolicySpec { + // egress contains the list of egress policy rules + repeated EgressNetworkPolicyRule egress = 1; +} + +// HostSubnet describes the container subnet network on a node. The HostSubnet object must have the +// same name as the Node object it corresponds to. +// +kubebuilder:printcolumn:name="Host",type=string,JSONPath=`.host`,description="The name of the node" +// +kubebuilder:printcolumn:name="Host IP",type=string,JSONPath=`.hostIP`,description="The IP address to be used as a VTEP by other nodes in the overlay network" +// +kubebuilder:printcolumn:name="Subnet",type=string,JSONPath=`.subnet`,description="The CIDR range of the overlay network assigned to the node for its pods" +// +kubebuilder:printcolumn:name="Egress CIDRs",type=string,JSONPath=`.egressCIDRs`,description="The network egress CIDRs" +// +kubebuilder:printcolumn:name="Egress IPs",type=string,JSONPath=`.egressIPs`,description="The network egress IP addresses" +message HostSubnet { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Host is the name of the node. (This is the same as the object's name, but both fields must be set.) + // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` + optional string host = 2; + + // HostIP is the IP address to be used as a VTEP by other nodes in the overlay network + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` + optional string hostIP = 3; + + // Subnet is the CIDR range of the overlay network assigned to the node for its pods + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + optional string subnet = 4; + + // EgressIPs is the list of automatic egress IP addresses currently hosted by this node. + // If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the + // master will overwrite the value here with its own allocation of egress IPs. + // +optional + repeated string egressIPs = 5; + + // EgressCIDRs is the list of CIDR ranges available for automatically assigning + // egress IPs to this node from. If this field is set then EgressIPs should be + // treated as read-only. + // +optional + repeated string egressCIDRs = 6; +} + +// HostSubnetList is a collection of HostSubnets +message HostSubnetList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of host subnets + repeated HostSubnet items = 2; +} + +// NetNamespace describes a single isolated network. When using the redhat/openshift-ovs-multitenant +// plugin, every Namespace will have a corresponding NetNamespace object with the same name. +// (When using redhat/openshift-ovs-subnet, NetNamespaces are not used.) +// +kubebuilder:printcolumn:name="NetID",type=integer,JSONPath=`.netid`,description="The network identifier of the network namespace" +// +kubebuilder:printcolumn:name="Egress IPs",type=string,JSONPath=`.egressIPs`,description="The network egress IP addresses" +message NetNamespace { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.) + // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` + optional string netname = 2; + + // NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=16777215 + optional uint32 netid = 3; + + // EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. + // (If empty, external traffic will be masqueraded to Node IPs.) + // +optional + repeated string egressIPs = 4; +} + +// NetNamespaceList is a collection of NetNamespaces +message NetNamespaceList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of net namespaces + repeated NetNamespace items = 2; +} + diff --git a/vendor/github.com/openshift/api/network/v1/legacy.go b/vendor/github.com/openshift/api/network/v1/legacy.go new file mode 100644 index 0000000000000000000000000000000000000000..4395ebf8e56da1e2e9620afb38f3e840e3262e78 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/legacy.go @@ -0,0 +1,27 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &ClusterNetwork{}, + &ClusterNetworkList{}, + &HostSubnet{}, + &HostSubnetList{}, + &NetNamespace{}, + &NetNamespaceList{}, + &EgressNetworkPolicy{}, + &EgressNetworkPolicyList{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/network/v1/register.go b/vendor/github.com/openshift/api/network/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..80defa76427c77b2fccca9a932f75660f5bcc56e --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/register.go @@ -0,0 +1,44 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "network.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ClusterNetwork{}, + &ClusterNetworkList{}, + &HostSubnet{}, + &HostSubnetList{}, + &NetNamespace{}, + &NetNamespaceList{}, + &EgressNetworkPolicy{}, + &EgressNetworkPolicyList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/network/v1/types.go b/vendor/github.com/openshift/api/network/v1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..c0ee55126f51d631582e3d1d8bf7123902c728dc --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/types.go @@ -0,0 +1,252 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + ClusterNetworkDefault = "default" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterNetwork describes the cluster network. There is normally only one object of this type, +// named "default", which is created by the SDN network plugin based on the master configuration +// when the cluster is brought up for the first time. +// +kubebuilder:resource:scope="Cluster" +// +kubebuilder:printcolumn:name="Cluster Network",type=string,JSONPath=`.network`,description="The primary cluster network CIDR" +// +kubebuilder:printcolumn:name="Service Network",type=string,JSONPath=`.serviceNetwork`,description="The service network CIDR" +// +kubebuilder:printcolumn:name="Plugin Name",type=string,JSONPath=`.pluginName`,description="The Openshift SDN network plug-in in use" +type ClusterNetwork struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Network is a CIDR string specifying the global overlay network's L3 space + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + Network string `json:"network,omitempty" protobuf:"bytes,2,opt,name=network"` + + // HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=30 + HostSubnetLength uint32 `json:"hostsubnetlength,omitempty" protobuf:"varint,3,opt,name=hostsubnetlength"` + + // ServiceNetwork is the CIDR range that Service IP addresses are allocated from + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + ServiceNetwork string `json:"serviceNetwork" protobuf:"bytes,4,opt,name=serviceNetwork"` + + // PluginName is the name of the network plugin being used + PluginName string `json:"pluginName,omitempty" protobuf:"bytes,5,opt,name=pluginName"` + + // ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from. + ClusterNetworks []ClusterNetworkEntry `json:"clusterNetworks" protobuf:"bytes,6,rep,name=clusterNetworks"` + + // VXLANPort sets the VXLAN destination port used by the cluster. + // It is set by the master configuration file on startup and cannot be edited manually. + // Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. + // Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Optional + // +optional + VXLANPort *uint32 `json:"vxlanPort,omitempty" protobuf:"varint,7,opt,name=vxlanPort"` + + // MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator. + // +kubebuilder:validation:Minimum=576 + // +kubebuilder:validation:Maximum=65536 + // +kubebuilder:validation:Optional + // +optional + MTU *uint32 `json:"mtu,omitempty" protobuf:"varint,8,opt,name=mtu"` +} + +// ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips. +type ClusterNetworkEntry struct { + // CIDR defines the total range of a cluster networks address space. + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + CIDR string `json:"CIDR" protobuf:"bytes,1,opt,name=cidr"` + + // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods. + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=30 + HostSubnetLength uint32 `json:"hostSubnetLength" protobuf:"varint,2,opt,name=hostSubnetLength"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterNetworkList is a collection of ClusterNetworks +type ClusterNetworkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of cluster networks + Items []ClusterNetwork `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// HostSubnetEgressIP represents one egress IP address currently hosted on the node represented by +// HostSubnet +// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` +type HostSubnetEgressIP string + +// HostSubnetEgressCIDR represents one egress CIDR from which to assign IP addresses for this node +// represented by the HostSubnet +// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` +type HostSubnetEgressCIDR string + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HostSubnet describes the container subnet network on a node. The HostSubnet object must have the +// same name as the Node object it corresponds to. +// +kubebuilder:printcolumn:name="Host",type=string,JSONPath=`.host`,description="The name of the node" +// +kubebuilder:printcolumn:name="Host IP",type=string,JSONPath=`.hostIP`,description="The IP address to be used as a VTEP by other nodes in the overlay network" +// +kubebuilder:printcolumn:name="Subnet",type=string,JSONPath=`.subnet`,description="The CIDR range of the overlay network assigned to the node for its pods" +// +kubebuilder:printcolumn:name="Egress CIDRs",type=string,JSONPath=`.egressCIDRs`,description="The network egress CIDRs" +// +kubebuilder:printcolumn:name="Egress IPs",type=string,JSONPath=`.egressIPs`,description="The network egress IP addresses" +type HostSubnet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Host is the name of the node. (This is the same as the object's name, but both fields must be set.) + // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` + Host string `json:"host" protobuf:"bytes,2,opt,name=host"` + + // HostIP is the IP address to be used as a VTEP by other nodes in the overlay network + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` + HostIP string `json:"hostIP" protobuf:"bytes,3,opt,name=hostIP"` + + // Subnet is the CIDR range of the overlay network assigned to the node for its pods + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + Subnet string `json:"subnet" protobuf:"bytes,4,opt,name=subnet"` + + // EgressIPs is the list of automatic egress IP addresses currently hosted by this node. + // If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the + // master will overwrite the value here with its own allocation of egress IPs. + // +optional + EgressIPs []HostSubnetEgressIP `json:"egressIPs,omitempty" protobuf:"bytes,5,rep,name=egressIPs"` + + // EgressCIDRs is the list of CIDR ranges available for automatically assigning + // egress IPs to this node from. If this field is set then EgressIPs should be + // treated as read-only. + // +optional + EgressCIDRs []HostSubnetEgressCIDR `json:"egressCIDRs,omitempty" protobuf:"bytes,6,rep,name=egressCIDRs"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HostSubnetList is a collection of HostSubnets +type HostSubnetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of host subnets + Items []HostSubnet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// NetNamespaceEgressIP is a single egress IP out of a list of reserved IPs used as source of external traffic coming +// from pods in this namespace +// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` +type NetNamespaceEgressIP string + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetNamespace describes a single isolated network. When using the redhat/openshift-ovs-multitenant +// plugin, every Namespace will have a corresponding NetNamespace object with the same name. +// (When using redhat/openshift-ovs-subnet, NetNamespaces are not used.) +// +kubebuilder:printcolumn:name="NetID",type=integer,JSONPath=`.netid`,description="The network identifier of the network namespace" +// +kubebuilder:printcolumn:name="Egress IPs",type=string,JSONPath=`.egressIPs`,description="The network egress IP addresses" +type NetNamespace struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.) + // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` + NetName string `json:"netname" protobuf:"bytes,2,opt,name=netname"` + + // NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=16777215 + NetID uint32 `json:"netid" protobuf:"varint,3,opt,name=netid"` + + // EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. + // (If empty, external traffic will be masqueraded to Node IPs.) + // +optional + EgressIPs []NetNamespaceEgressIP `json:"egressIPs,omitempty" protobuf:"bytes,4,rep,name=egressIPs"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetNamespaceList is a collection of NetNamespaces +type NetNamespaceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of net namespaces + Items []NetNamespace `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// EgressNetworkPolicyRuleType indicates whether an EgressNetworkPolicyRule allows or denies traffic +// +kubebuilder:validation:Pattern=`^Allow|Deny$` +type EgressNetworkPolicyRuleType string + +const ( + EgressNetworkPolicyRuleAllow EgressNetworkPolicyRuleType = "Allow" + EgressNetworkPolicyRuleDeny EgressNetworkPolicyRuleType = "Deny" +) + +// EgressNetworkPolicyPeer specifies a target to apply egress network policy to +type EgressNetworkPolicyPeer struct { + // CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset + // Ideally we would have liked to use the cidr openapi format for this property. + // But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs + // We are therefore using a regex pattern to validate instead. + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + CIDRSelector string `json:"cidrSelector,omitempty" protobuf:"bytes,1,rep,name=cidrSelector"` + // DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset + // +kubebuilder:validation:Pattern=`^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$` + DNSName string `json:"dnsName,omitempty" protobuf:"bytes,2,rep,name=dnsName"` +} + +// EgressNetworkPolicyRule contains a single egress network policy rule +type EgressNetworkPolicyRule struct { + // type marks this as an "Allow" or "Deny" rule + Type EgressNetworkPolicyRuleType `json:"type" protobuf:"bytes,1,rep,name=type"` + // to is the target that traffic is allowed/denied to + To EgressNetworkPolicyPeer `json:"to" protobuf:"bytes,2,rep,name=to"` +} + +// EgressNetworkPolicySpec provides a list of policies on outgoing network traffic +type EgressNetworkPolicySpec struct { + // egress contains the list of egress policy rules + Egress []EgressNetworkPolicyRule `json:"egress" protobuf:"bytes,1,rep,name=egress"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EgressNetworkPolicy describes the current egress network policy for a Namespace. When using +// the 'redhat/openshift-ovs-multitenant' network plugin, traffic from a pod to an IP address +// outside the cluster will be checked against each EgressNetworkPolicyRule in the pod's +// namespace's EgressNetworkPolicy, in order. If no rule matches (or no EgressNetworkPolicy +// is present) then the traffic will be allowed by default. +type EgressNetworkPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the specification of the current egress network policy + Spec EgressNetworkPolicySpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EgressNetworkPolicyList is a collection of EgressNetworkPolicy +type EgressNetworkPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of policies + Items []EgressNetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..32f766e01eb5f7099f66a9e65042715252d903a6 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go @@ -0,0 +1,346 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetwork) DeepCopyInto(out *ClusterNetwork) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.ClusterNetworks != nil { + in, out := &in.ClusterNetworks, &out.ClusterNetworks + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.VXLANPort != nil { + in, out := &in.VXLANPort, &out.VXLANPort + *out = new(uint32) + **out = **in + } + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetwork. +func (in *ClusterNetwork) DeepCopy() *ClusterNetwork { + if in == nil { + return nil + } + out := new(ClusterNetwork) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterNetwork) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkList) DeepCopyInto(out *ClusterNetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterNetwork, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkList. +func (in *ClusterNetworkList) DeepCopy() *ClusterNetworkList { + if in == nil { + return nil + } + out := new(ClusterNetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterNetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressNetworkPolicy) DeepCopyInto(out *EgressNetworkPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicy. +func (in *EgressNetworkPolicy) DeepCopy() *EgressNetworkPolicy { + if in == nil { + return nil + } + out := new(EgressNetworkPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EgressNetworkPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressNetworkPolicyList) DeepCopyInto(out *EgressNetworkPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EgressNetworkPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicyList. +func (in *EgressNetworkPolicyList) DeepCopy() *EgressNetworkPolicyList { + if in == nil { + return nil + } + out := new(EgressNetworkPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EgressNetworkPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressNetworkPolicyPeer) DeepCopyInto(out *EgressNetworkPolicyPeer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicyPeer. +func (in *EgressNetworkPolicyPeer) DeepCopy() *EgressNetworkPolicyPeer { + if in == nil { + return nil + } + out := new(EgressNetworkPolicyPeer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressNetworkPolicyRule) DeepCopyInto(out *EgressNetworkPolicyRule) { + *out = *in + out.To = in.To + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicyRule. +func (in *EgressNetworkPolicyRule) DeepCopy() *EgressNetworkPolicyRule { + if in == nil { + return nil + } + out := new(EgressNetworkPolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressNetworkPolicySpec) DeepCopyInto(out *EgressNetworkPolicySpec) { + *out = *in + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]EgressNetworkPolicyRule, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicySpec. +func (in *EgressNetworkPolicySpec) DeepCopy() *EgressNetworkPolicySpec { + if in == nil { + return nil + } + out := new(EgressNetworkPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostSubnet) DeepCopyInto(out *HostSubnet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.EgressIPs != nil { + in, out := &in.EgressIPs, &out.EgressIPs + *out = make([]HostSubnetEgressIP, len(*in)) + copy(*out, *in) + } + if in.EgressCIDRs != nil { + in, out := &in.EgressCIDRs, &out.EgressCIDRs + *out = make([]HostSubnetEgressCIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostSubnet. +func (in *HostSubnet) DeepCopy() *HostSubnet { + if in == nil { + return nil + } + out := new(HostSubnet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostSubnet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostSubnetList) DeepCopyInto(out *HostSubnetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HostSubnet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostSubnetList. +func (in *HostSubnetList) DeepCopy() *HostSubnetList { + if in == nil { + return nil + } + out := new(HostSubnetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostSubnetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetNamespace) DeepCopyInto(out *NetNamespace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.EgressIPs != nil { + in, out := &in.EgressIPs, &out.EgressIPs + *out = make([]NetNamespaceEgressIP, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetNamespace. +func (in *NetNamespace) DeepCopy() *NetNamespace { + if in == nil { + return nil + } + out := new(NetNamespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetNamespace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetNamespaceList) DeepCopyInto(out *NetNamespaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetNamespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetNamespaceList. +func (in *NetNamespaceList) DeepCopy() *NetNamespaceList { + if in == nil { + return nil + } + out := new(NetNamespaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetNamespaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..9a3d2ffdf84bcb3db25ebc71bfd91f0bc33a5b74 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,137 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ClusterNetwork = map[string]string{ + "": "ClusterNetwork describes the cluster network. There is normally only one object of this type, named \"default\", which is created by the SDN network plugin based on the master configuration when the cluster is brought up for the first time.", + "network": "Network is a CIDR string specifying the global overlay network's L3 space", + "hostsubnetlength": "HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods", + "serviceNetwork": "ServiceNetwork is the CIDR range that Service IP addresses are allocated from", + "pluginName": "PluginName is the name of the network plugin being used", + "clusterNetworks": "ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from.", + "vxlanPort": "VXLANPort sets the VXLAN destination port used by the cluster. It is set by the master configuration file on startup and cannot be edited manually. Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port.", + "mtu": "MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator.", +} + +func (ClusterNetwork) SwaggerDoc() map[string]string { + return map_ClusterNetwork +} + +var map_ClusterNetworkEntry = map[string]string{ + "": "ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.", + "CIDR": "CIDR defines the total range of a cluster networks address space.", + "hostSubnetLength": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods.", +} + +func (ClusterNetworkEntry) SwaggerDoc() map[string]string { + return map_ClusterNetworkEntry +} + +var map_ClusterNetworkList = map[string]string{ + "": "ClusterNetworkList is a collection of ClusterNetworks", + "items": "Items is the list of cluster networks", +} + +func (ClusterNetworkList) SwaggerDoc() map[string]string { + return map_ClusterNetworkList +} + +var map_EgressNetworkPolicy = map[string]string{ + "": "EgressNetworkPolicy describes the current egress network policy for a Namespace. When using the 'redhat/openshift-ovs-multitenant' network plugin, traffic from a pod to an IP address outside the cluster will be checked against each EgressNetworkPolicyRule in the pod's namespace's EgressNetworkPolicy, in order. If no rule matches (or no EgressNetworkPolicy is present) then the traffic will be allowed by default.", + "spec": "spec is the specification of the current egress network policy", +} + +func (EgressNetworkPolicy) SwaggerDoc() map[string]string { + return map_EgressNetworkPolicy +} + +var map_EgressNetworkPolicyList = map[string]string{ + "": "EgressNetworkPolicyList is a collection of EgressNetworkPolicy", + "items": "items is the list of policies", +} + +func (EgressNetworkPolicyList) SwaggerDoc() map[string]string { + return map_EgressNetworkPolicyList +} + +var map_EgressNetworkPolicyPeer = map[string]string{ + "": "EgressNetworkPolicyPeer specifies a target to apply egress network policy to", + "cidrSelector": "CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset Ideally we would have liked to use the cidr openapi format for this property. But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs We are therefore using a regex pattern to validate instead.", + "dnsName": "DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset", +} + +func (EgressNetworkPolicyPeer) SwaggerDoc() map[string]string { + return map_EgressNetworkPolicyPeer +} + +var map_EgressNetworkPolicyRule = map[string]string{ + "": "EgressNetworkPolicyRule contains a single egress network policy rule", + "type": "type marks this as an \"Allow\" or \"Deny\" rule", + "to": "to is the target that traffic is allowed/denied to", +} + +func (EgressNetworkPolicyRule) SwaggerDoc() map[string]string { + return map_EgressNetworkPolicyRule +} + +var map_EgressNetworkPolicySpec = map[string]string{ + "": "EgressNetworkPolicySpec provides a list of policies on outgoing network traffic", + "egress": "egress contains the list of egress policy rules", +} + +func (EgressNetworkPolicySpec) SwaggerDoc() map[string]string { + return map_EgressNetworkPolicySpec +} + +var map_HostSubnet = map[string]string{ + "": "HostSubnet describes the container subnet network on a node. The HostSubnet object must have the same name as the Node object it corresponds to.", + "host": "Host is the name of the node. (This is the same as the object's name, but both fields must be set.)", + "hostIP": "HostIP is the IP address to be used as a VTEP by other nodes in the overlay network", + "subnet": "Subnet is the CIDR range of the overlay network assigned to the node for its pods", + "egressIPs": "EgressIPs is the list of automatic egress IP addresses currently hosted by this node. If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the master will overwrite the value here with its own allocation of egress IPs.", + "egressCIDRs": "EgressCIDRs is the list of CIDR ranges available for automatically assigning egress IPs to this node from. If this field is set then EgressIPs should be treated as read-only.", +} + +func (HostSubnet) SwaggerDoc() map[string]string { + return map_HostSubnet +} + +var map_HostSubnetList = map[string]string{ + "": "HostSubnetList is a collection of HostSubnets", + "items": "Items is the list of host subnets", +} + +func (HostSubnetList) SwaggerDoc() map[string]string { + return map_HostSubnetList +} + +var map_NetNamespace = map[string]string{ + "": "NetNamespace describes a single isolated network. When using the redhat/openshift-ovs-multitenant plugin, every Namespace will have a corresponding NetNamespace object with the same name. (When using redhat/openshift-ovs-subnet, NetNamespaces are not used.)", + "netname": "NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.)", + "netid": "NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the \"oc adm pod-network\" commands.", + "egressIPs": "EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. (If empty, external traffic will be masqueraded to Node IPs.)", +} + +func (NetNamespace) SwaggerDoc() map[string]string { + return map_NetNamespace +} + +var map_NetNamespaceList = map[string]string{ + "": "NetNamespaceList is a collection of NetNamespaces", + "items": "Items is the list of net namespaces", +} + +func (NetNamespaceList) SwaggerDoc() map[string]string { + return map_NetNamespaceList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/oauth/install.go b/vendor/github.com/openshift/api/oauth/install.go new file mode 100644 index 0000000000000000000000000000000000000000..6bf63539d9f76c65736215bc9a215f42e22ac1cd --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/install.go @@ -0,0 +1,26 @@ +package oauth + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + oauthv1 "github.com/openshift/api/oauth/v1" +) + +const ( + GroupName = "oauth.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(oauthv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/oauth/v1/doc.go b/vendor/github.com/openshift/api/oauth/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..cae9e70d4a53653a21eb47c98985227e9197b6e9 --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/oauth/apis/oauth +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=oauth.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/oauth/v1/generated.pb.go b/vendor/github.com/openshift/api/oauth/v1/generated.pb.go new file mode 100644 index 0000000000000000000000000000000000000000..9ff61e190d71a7eb9b4ffe42b59768bf7d9f0242 --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/generated.pb.go @@ -0,0 +1,3955 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/oauth/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *ClusterRoleScopeRestriction) Reset() { *m = ClusterRoleScopeRestriction{} } +func (*ClusterRoleScopeRestriction) ProtoMessage() {} +func (*ClusterRoleScopeRestriction) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{0} +} +func (m *ClusterRoleScopeRestriction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleScopeRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleScopeRestriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleScopeRestriction.Merge(m, src) +} +func (m *ClusterRoleScopeRestriction) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleScopeRestriction) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleScopeRestriction.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleScopeRestriction proto.InternalMessageInfo + +func (m *OAuthAccessToken) Reset() { *m = OAuthAccessToken{} } +func (*OAuthAccessToken) ProtoMessage() {} +func (*OAuthAccessToken) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{1} +} +func (m *OAuthAccessToken) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthAccessToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthAccessToken) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthAccessToken.Merge(m, src) +} +func (m *OAuthAccessToken) XXX_Size() int { + return m.Size() +} +func (m *OAuthAccessToken) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthAccessToken.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthAccessToken proto.InternalMessageInfo + +func (m *OAuthAccessTokenList) Reset() { *m = OAuthAccessTokenList{} } +func (*OAuthAccessTokenList) ProtoMessage() {} +func (*OAuthAccessTokenList) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{2} +} +func (m *OAuthAccessTokenList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthAccessTokenList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthAccessTokenList) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthAccessTokenList.Merge(m, src) +} +func (m *OAuthAccessTokenList) XXX_Size() int { + return m.Size() +} +func (m *OAuthAccessTokenList) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthAccessTokenList.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthAccessTokenList proto.InternalMessageInfo + +func (m *OAuthAuthorizeToken) Reset() { *m = OAuthAuthorizeToken{} } +func (*OAuthAuthorizeToken) ProtoMessage() {} +func (*OAuthAuthorizeToken) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{3} +} +func (m *OAuthAuthorizeToken) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthAuthorizeToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthAuthorizeToken) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthAuthorizeToken.Merge(m, src) +} +func (m *OAuthAuthorizeToken) XXX_Size() int { + return m.Size() +} +func (m *OAuthAuthorizeToken) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthAuthorizeToken.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthAuthorizeToken proto.InternalMessageInfo + +func (m *OAuthAuthorizeTokenList) Reset() { *m = OAuthAuthorizeTokenList{} } +func (*OAuthAuthorizeTokenList) ProtoMessage() {} +func (*OAuthAuthorizeTokenList) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{4} +} +func (m *OAuthAuthorizeTokenList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthAuthorizeTokenList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthAuthorizeTokenList) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthAuthorizeTokenList.Merge(m, src) +} +func (m *OAuthAuthorizeTokenList) XXX_Size() int { + return m.Size() +} +func (m *OAuthAuthorizeTokenList) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthAuthorizeTokenList.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthAuthorizeTokenList proto.InternalMessageInfo + +func (m *OAuthClient) Reset() { *m = OAuthClient{} } +func (*OAuthClient) ProtoMessage() {} +func (*OAuthClient) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{5} +} +func (m *OAuthClient) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthClient) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthClient) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthClient.Merge(m, src) +} +func (m *OAuthClient) XXX_Size() int { + return m.Size() +} +func (m *OAuthClient) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthClient.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthClient proto.InternalMessageInfo + +func (m *OAuthClientAuthorization) Reset() { *m = OAuthClientAuthorization{} } +func (*OAuthClientAuthorization) ProtoMessage() {} +func (*OAuthClientAuthorization) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{6} +} +func (m *OAuthClientAuthorization) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthClientAuthorization) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthClientAuthorization) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthClientAuthorization.Merge(m, src) +} +func (m *OAuthClientAuthorization) XXX_Size() int { + return m.Size() +} +func (m *OAuthClientAuthorization) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthClientAuthorization.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthClientAuthorization proto.InternalMessageInfo + +func (m *OAuthClientAuthorizationList) Reset() { *m = OAuthClientAuthorizationList{} } +func (*OAuthClientAuthorizationList) ProtoMessage() {} +func (*OAuthClientAuthorizationList) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{7} +} +func (m *OAuthClientAuthorizationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthClientAuthorizationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthClientAuthorizationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthClientAuthorizationList.Merge(m, src) +} +func (m *OAuthClientAuthorizationList) XXX_Size() int { + return m.Size() +} +func (m *OAuthClientAuthorizationList) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthClientAuthorizationList.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthClientAuthorizationList proto.InternalMessageInfo + +func (m *OAuthClientList) Reset() { *m = OAuthClientList{} } +func (*OAuthClientList) ProtoMessage() {} +func (*OAuthClientList) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{8} +} +func (m *OAuthClientList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthClientList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthClientList) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthClientList.Merge(m, src) +} +func (m *OAuthClientList) XXX_Size() int { + return m.Size() +} +func (m *OAuthClientList) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthClientList.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthClientList proto.InternalMessageInfo + +func (m *OAuthRedirectReference) Reset() { *m = OAuthRedirectReference{} } +func (*OAuthRedirectReference) ProtoMessage() {} +func (*OAuthRedirectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{9} +} +func (m *OAuthRedirectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthRedirectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthRedirectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthRedirectReference.Merge(m, src) +} +func (m *OAuthRedirectReference) XXX_Size() int { + return m.Size() +} +func (m *OAuthRedirectReference) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthRedirectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthRedirectReference proto.InternalMessageInfo + +func (m *RedirectReference) Reset() { *m = RedirectReference{} } +func (*RedirectReference) ProtoMessage() {} +func (*RedirectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{10} +} +func (m *RedirectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RedirectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RedirectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_RedirectReference.Merge(m, src) +} +func (m *RedirectReference) XXX_Size() int { + return m.Size() +} +func (m *RedirectReference) XXX_DiscardUnknown() { + xxx_messageInfo_RedirectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_RedirectReference proto.InternalMessageInfo + +func (m *ScopeRestriction) Reset() { *m = ScopeRestriction{} } +func (*ScopeRestriction) ProtoMessage() {} +func (*ScopeRestriction) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{11} +} +func (m *ScopeRestriction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScopeRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScopeRestriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopeRestriction.Merge(m, src) +} +func (m *ScopeRestriction) XXX_Size() int { + return m.Size() +} +func (m *ScopeRestriction) XXX_DiscardUnknown() { + xxx_messageInfo_ScopeRestriction.DiscardUnknown(m) +} + +var xxx_messageInfo_ScopeRestriction proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ClusterRoleScopeRestriction)(nil), "github.com.openshift.api.oauth.v1.ClusterRoleScopeRestriction") + proto.RegisterType((*OAuthAccessToken)(nil), "github.com.openshift.api.oauth.v1.OAuthAccessToken") + proto.RegisterType((*OAuthAccessTokenList)(nil), "github.com.openshift.api.oauth.v1.OAuthAccessTokenList") + proto.RegisterType((*OAuthAuthorizeToken)(nil), "github.com.openshift.api.oauth.v1.OAuthAuthorizeToken") + proto.RegisterType((*OAuthAuthorizeTokenList)(nil), "github.com.openshift.api.oauth.v1.OAuthAuthorizeTokenList") + proto.RegisterType((*OAuthClient)(nil), "github.com.openshift.api.oauth.v1.OAuthClient") + proto.RegisterType((*OAuthClientAuthorization)(nil), "github.com.openshift.api.oauth.v1.OAuthClientAuthorization") + proto.RegisterType((*OAuthClientAuthorizationList)(nil), "github.com.openshift.api.oauth.v1.OAuthClientAuthorizationList") + proto.RegisterType((*OAuthClientList)(nil), "github.com.openshift.api.oauth.v1.OAuthClientList") + proto.RegisterType((*OAuthRedirectReference)(nil), "github.com.openshift.api.oauth.v1.OAuthRedirectReference") + proto.RegisterType((*RedirectReference)(nil), "github.com.openshift.api.oauth.v1.RedirectReference") + proto.RegisterType((*ScopeRestriction)(nil), "github.com.openshift.api.oauth.v1.ScopeRestriction") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/oauth/v1/generated.proto", fileDescriptor_bd688dca7ea39c8a) +} + +var fileDescriptor_bd688dca7ea39c8a = []byte{ + // 1228 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0xcd, 0x6f, 0x1b, 0xc5, + 0x1b, 0xce, 0x26, 0xb6, 0x63, 0x8f, 0x9b, 0x0f, 0x4f, 0x9a, 0x76, 0x7f, 0x6d, 0x7f, 0xb6, 0x71, + 0x24, 0x6a, 0x04, 0xac, 0x49, 0x28, 0x55, 0xa5, 0x4a, 0x95, 0x6c, 0x53, 0x15, 0x0b, 0xd2, 0x4a, + 0xe3, 0x06, 0x2a, 0xe8, 0xa1, 0x93, 0xdd, 0x37, 0xf6, 0x90, 0xf5, 0xee, 0xb2, 0x33, 0x0e, 0x09, + 0xea, 0x81, 0x0b, 0x77, 0xfe, 0x11, 0x2e, 0xdc, 0x39, 0x20, 0x71, 0xc8, 0x09, 0xf5, 0xc0, 0xa1, + 0x27, 0x8b, 0x18, 0xf1, 0x4f, 0x70, 0x42, 0x3b, 0xbb, 0xde, 0x0f, 0x7f, 0x10, 0xe7, 0x12, 0x71, + 0xe0, 0xb6, 0xfb, 0x3e, 0xcf, 0xf3, 0xce, 0xc7, 0xbe, 0xcf, 0x3b, 0xb3, 0x68, 0xbb, 0xc3, 0x44, + 0xb7, 0xbf, 0xaf, 0xe9, 0x76, 0xaf, 0x66, 0x3b, 0x60, 0xf1, 0x2e, 0x3b, 0x10, 0x35, 0xea, 0xb0, + 0x9a, 0x4d, 0xfb, 0xa2, 0x5b, 0x3b, 0xda, 0xae, 0x75, 0xc0, 0x02, 0x97, 0x0a, 0x30, 0x34, 0xc7, + 0xb5, 0x85, 0x8d, 0xdf, 0x88, 0x24, 0x5a, 0x28, 0xd1, 0xa8, 0xc3, 0x34, 0x29, 0xd1, 0x8e, 0xb6, + 0x6f, 0xbc, 0x1b, 0xcb, 0xda, 0xb1, 0x3b, 0x76, 0x4d, 0x2a, 0xf7, 0xfb, 0x07, 0xf2, 0x4d, 0xbe, + 0xc8, 0x27, 0x3f, 0xe3, 0x8d, 0x3b, 0x87, 0xf7, 0xb8, 0xc6, 0x6c, 0x6f, 0xd8, 0x1e, 0xd5, 0xbb, + 0xcc, 0x02, 0xf7, 0xa4, 0xe6, 0x1c, 0x76, 0xbc, 0x00, 0xaf, 0xf5, 0x40, 0xd0, 0x29, 0xf3, 0xb8, + 0x71, 0x77, 0x96, 0xca, 0xed, 0x5b, 0x82, 0xf5, 0xa0, 0xc6, 0xf5, 0x2e, 0xf4, 0xe8, 0xb8, 0xae, + 0xf2, 0x93, 0x82, 0x6e, 0x36, 0xcd, 0x3e, 0x17, 0xe0, 0x12, 0xdb, 0x84, 0xb6, 0x6e, 0x3b, 0x40, + 0x80, 0x0b, 0x97, 0xe9, 0x82, 0xd9, 0x16, 0x7e, 0x1b, 0xe5, 0x5c, 0xdb, 0x84, 0xc7, 0xb4, 0x07, + 0x5c, 0x55, 0xca, 0x4b, 0xd5, 0x5c, 0x63, 0x65, 0x38, 0x28, 0xe5, 0xc8, 0x28, 0x48, 0x22, 0x1c, + 0x6b, 0x08, 0x59, 0xde, 0x83, 0x43, 0x75, 0xe0, 0xea, 0xa2, 0x64, 0xaf, 0x0e, 0x07, 0x25, 0xf4, + 0x38, 0x8c, 0x92, 0x18, 0x03, 0xd7, 0xd1, 0x1a, 0x35, 0x4d, 0xfb, 0xeb, 0x87, 0x5c, 0xa7, 0x26, + 0xf5, 0xc6, 0x53, 0x97, 0xca, 0x4a, 0x35, 0xdb, 0xb8, 0x7e, 0x3a, 0x28, 0x2d, 0x0c, 0x07, 0xa5, + 0xb5, 0x7a, 0x12, 0x26, 0xe3, 0xfc, 0xca, 0x9f, 0x29, 0xb4, 0xfe, 0xa4, 0xde, 0x17, 0xdd, 0xba, + 0xae, 0x03, 0xe7, 0x4f, 0xed, 0x43, 0xb0, 0xf0, 0x0b, 0x94, 0xf5, 0xf6, 0xc9, 0xa0, 0x82, 0xaa, + 0x4a, 0x59, 0xa9, 0xe6, 0x77, 0xde, 0xd3, 0xfc, 0xfd, 0xd1, 0xe2, 0xfb, 0xa3, 0x39, 0x87, 0x1d, + 0x2f, 0xc0, 0x35, 0x8f, 0xad, 0x1d, 0x6d, 0x6b, 0x4f, 0xf6, 0xbf, 0x04, 0x5d, 0xec, 0x82, 0xa0, + 0x0d, 0x1c, 0x4c, 0x01, 0x45, 0x31, 0x12, 0x66, 0xc5, 0x3b, 0x08, 0xe9, 0x26, 0x03, 0x4b, 0x78, + 0x2b, 0x53, 0x17, 0xcb, 0x4a, 0x35, 0x17, 0x29, 0x9a, 0x21, 0x42, 0x62, 0x2c, 0x5c, 0x43, 0x39, + 0x38, 0x76, 0x98, 0x0b, 0xbc, 0xe5, 0xaf, 0x73, 0xa9, 0x51, 0x08, 0x24, 0xb9, 0x87, 0x23, 0x80, + 0x44, 0x1c, 0x5c, 0x41, 0x19, 0xee, 0x7d, 0x0f, 0xae, 0xa6, 0xe4, 0x56, 0xa2, 0xe1, 0xa0, 0x94, + 0x91, 0x5f, 0x88, 0x93, 0x00, 0xc1, 0x1f, 0xa0, 0xbc, 0x0b, 0x06, 0x73, 0x41, 0x17, 0x7b, 0xa4, + 0xa5, 0xa6, 0xe5, 0x4c, 0x36, 0x82, 0xb4, 0x79, 0x12, 0x41, 0x24, 0xce, 0xc3, 0xef, 0xa0, 0x6c, + 0x9f, 0x83, 0x2b, 0x67, 0x9f, 0x91, 0x9a, 0xf5, 0x40, 0x93, 0xdd, 0x0b, 0xe2, 0x24, 0x64, 0xe0, + 0xb7, 0xd0, 0xb2, 0xf7, 0xbc, 0xd7, 0xfa, 0x50, 0x5d, 0x96, 0xe4, 0xb5, 0x80, 0xbc, 0xbc, 0xe7, + 0x87, 0xc9, 0x08, 0xc7, 0x0f, 0xd0, 0xaa, 0x57, 0xf7, 0xb6, 0xcb, 0xbe, 0x01, 0xf9, 0x31, 0xd4, + 0xac, 0x54, 0x5c, 0x0b, 0x14, 0xab, 0xf5, 0x04, 0x4a, 0xc6, 0xd8, 0xf8, 0x1e, 0xba, 0xe2, 0xc2, + 0x81, 0x0b, 0xbc, 0xeb, 0xab, 0x73, 0x52, 0x7d, 0x35, 0x50, 0x5f, 0x21, 0x31, 0x8c, 0x24, 0x98, + 0xf8, 0x39, 0x52, 0x99, 0x45, 0x75, 0xc1, 0x8e, 0x98, 0x38, 0x79, 0xca, 0x7a, 0x60, 0xf7, 0x45, + 0x1b, 0x74, 0xdb, 0x32, 0xb8, 0x8a, 0xca, 0x4a, 0x35, 0xdd, 0x28, 0x07, 0x59, 0xd4, 0xd6, 0x0c, + 0x1e, 0x99, 0x99, 0xa1, 0xf2, 0xab, 0x82, 0xae, 0x8e, 0xd7, 0xd9, 0x27, 0x8c, 0x0b, 0xfc, 0x7c, + 0xa2, 0xd6, 0xb4, 0xf9, 0x6a, 0xcd, 0x53, 0xcb, 0x4a, 0x0b, 0x77, 0x7e, 0x14, 0x89, 0xd5, 0xd9, + 0x33, 0x94, 0x66, 0x02, 0x7a, 0xbe, 0x99, 0xf2, 0x3b, 0xef, 0x6b, 0xe7, 0xb6, 0x1b, 0x6d, 0x7c, + 0x96, 0x8d, 0x95, 0x20, 0x7f, 0xba, 0xe5, 0x65, 0x22, 0x7e, 0xc2, 0xca, 0xcf, 0x29, 0xb4, 0xe1, + 0x53, 0x93, 0x1f, 0xe0, 0x3f, 0xef, 0x9c, 0xe7, 0x9d, 0x2d, 0x94, 0xe6, 0x82, 0x8a, 0x91, 0x71, + 0xc2, 0xed, 0x6d, 0x7b, 0x41, 0xe2, 0x63, 0x09, 0x83, 0x2d, 0x5f, 0xc4, 0x60, 0xd9, 0x73, 0x0c, + 0x76, 0x1f, 0xad, 0xe8, 0xb6, 0x01, 0xcd, 0x2e, 0x35, 0x4d, 0xb0, 0x3a, 0x10, 0x38, 0x64, 0x33, + 0x10, 0xac, 0x34, 0xe3, 0x20, 0x49, 0x72, 0xf1, 0x2e, 0xda, 0x48, 0x04, 0x76, 0x41, 0x74, 0x6d, + 0x43, 0xda, 0x23, 0xd7, 0xb8, 0x19, 0xa4, 0xd8, 0x68, 0x4e, 0x52, 0xc8, 0x34, 0x5d, 0xe5, 0x37, + 0x05, 0x5d, 0x9f, 0x52, 0x43, 0x97, 0xe0, 0x8b, 0x2f, 0x92, 0xbe, 0xb8, 0x3b, 0xb7, 0x2f, 0x12, + 0x13, 0x9d, 0x61, 0x8d, 0xef, 0x32, 0x28, 0x2f, 0xd9, 0x7e, 0x31, 0x5e, 0x82, 0x25, 0xde, 0x44, + 0x19, 0x0e, 0xba, 0x0b, 0x22, 0xb0, 0xc3, 0x6a, 0xc0, 0xce, 0xb4, 0x65, 0x94, 0x04, 0x28, 0x6e, + 0xa2, 0x02, 0x35, 0x0c, 0xe6, 0x9d, 0x7c, 0xd4, 0xf4, 0x31, 0xae, 0x2e, 0xc9, 0x02, 0xdf, 0x1c, + 0x0e, 0x4a, 0x85, 0xfa, 0x38, 0x48, 0x26, 0xf9, 0xb8, 0x8d, 0x36, 0x5d, 0xe0, 0x8e, 0x6d, 0x19, + 0x9f, 0x31, 0xd1, 0x0d, 0xbf, 0xa9, 0xe7, 0x14, 0xef, 0xec, 0xfd, 0x7f, 0x30, 0xf6, 0x26, 0x99, + 0x46, 0x22, 0xd3, 0xb5, 0xf8, 0x8e, 0xd7, 0xb7, 0x43, 0x8f, 0x70, 0x35, 0x2d, 0x27, 0xb5, 0xee, + 0xf7, 0xec, 0x28, 0x4e, 0x12, 0x2c, 0xdc, 0x42, 0xf9, 0x8e, 0x4b, 0x2d, 0x11, 0xd4, 0xa1, 0x6f, + 0xa8, 0xdb, 0x23, 0x07, 0x3e, 0x8a, 0xa0, 0xbf, 0x06, 0xa5, 0x75, 0xf9, 0xfa, 0x11, 0xb5, 0x0c, + 0x13, 0xdc, 0xa7, 0x27, 0x0e, 0x90, 0xb8, 0x16, 0xbf, 0x44, 0x05, 0x3e, 0x76, 0x79, 0xe1, 0xea, + 0xf2, 0xdc, 0x5d, 0x73, 0xfc, 0xe2, 0xd3, 0xf8, 0x5f, 0x30, 0x8b, 0xc2, 0x38, 0xc2, 0xc9, 0xe4, + 0x40, 0xf8, 0x19, 0x52, 0x69, 0xd4, 0x72, 0x77, 0xe9, 0x71, 0xbd, 0x03, 0xa3, 0xc3, 0x27, 0x2b, + 0x0f, 0x9f, 0x5b, 0xde, 0xc1, 0x53, 0x9f, 0xc1, 0x21, 0x33, 0xd5, 0xf8, 0x04, 0x6d, 0xc5, 0xb0, + 0x59, 0x27, 0x97, 0xec, 0x02, 0xe9, 0xc6, 0xed, 0xe1, 0xa0, 0xb4, 0x55, 0x3f, 0x9f, 0x4e, 0xe6, + 0xc9, 0x59, 0xf9, 0x61, 0x11, 0xa9, 0x31, 0x1f, 0x8c, 0xbc, 0x23, 0x2f, 0x5e, 0xff, 0xd2, 0x73, + 0x22, 0xde, 0x76, 0x97, 0x2e, 0xd2, 0x76, 0x53, 0xe7, 0xb4, 0xdd, 0xe8, 0x3c, 0x49, 0xcf, 0x3a, + 0x4f, 0x2a, 0x03, 0x05, 0xdd, 0x9a, 0xb5, 0x5f, 0x97, 0xd0, 0x13, 0x5f, 0x24, 0x7b, 0xe2, 0xfd, + 0x79, 0x7b, 0xe2, 0x94, 0xd9, 0xce, 0x68, 0x8c, 0xbf, 0x28, 0x68, 0x2d, 0x26, 0xb9, 0x84, 0x35, + 0xb5, 0x93, 0x6b, 0xd2, 0x2e, 0xb6, 0xa6, 0x19, 0xcb, 0x38, 0x53, 0xd0, 0x35, 0xc9, 0x1a, 0x75, + 0x26, 0x02, 0x07, 0xe0, 0x82, 0xa5, 0xc3, 0x25, 0x54, 0x35, 0xa0, 0x9c, 0x3b, 0x1a, 0x4e, 0x16, + 0x75, 0x7e, 0xe7, 0xce, 0x1c, 0xab, 0x9a, 0x98, 0x6a, 0x74, 0xff, 0x09, 0x43, 0x24, 0xca, 0x5c, + 0x79, 0x89, 0x0a, 0x93, 0xab, 0xdb, 0x42, 0xe9, 0x8e, 0x6b, 0xf7, 0x1d, 0xb9, 0xb4, 0xd8, 0xcd, + 0xe5, 0x91, 0x17, 0x24, 0x3e, 0x86, 0xcb, 0x28, 0x75, 0xc8, 0x2c, 0x23, 0x30, 0xdc, 0x95, 0x80, + 0x93, 0xfa, 0x98, 0x59, 0x06, 0x91, 0x88, 0xc7, 0xb0, 0x22, 0x83, 0x85, 0x0c, 0x69, 0x2e, 0x89, + 0x54, 0x7e, 0x54, 0xd0, 0xfa, 0x94, 0x5f, 0xc9, 0xac, 0xc9, 0x04, 0xb8, 0xd4, 0x1c, 0xfd, 0x49, + 0xae, 0x79, 0x5d, 0xfe, 0xe1, 0x31, 0xd5, 0xc5, 0xa7, 0xd4, 0xec, 0x03, 0x27, 0x21, 0x01, 0x7f, + 0x85, 0xf2, 0x7a, 0xf4, 0x5b, 0x1a, 0x6c, 0xd4, 0x83, 0x39, 0x36, 0xea, 0x1f, 0x7e, 0x66, 0xfd, + 0xf1, 0x62, 0x04, 0x12, 0x1f, 0xa3, 0x51, 0x3d, 0x3d, 0x2b, 0x2e, 0xbc, 0x3a, 0x2b, 0x2e, 0xbc, + 0x3e, 0x2b, 0x2e, 0x7c, 0x3b, 0x2c, 0x2a, 0xa7, 0xc3, 0xa2, 0xf2, 0x6a, 0x58, 0x54, 0x5e, 0x0f, + 0x8b, 0xca, 0xef, 0xc3, 0xa2, 0xf2, 0xfd, 0x1f, 0xc5, 0x85, 0xcf, 0x17, 0x8f, 0xb6, 0xff, 0x0e, + 0x00, 0x00, 0xff, 0xff, 0xd3, 0xf9, 0x68, 0xbb, 0x28, 0x10, 0x00, 0x00, +} + +func (m *ClusterRoleScopeRestriction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleScopeRestriction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleScopeRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.AllowEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.RoleNames) > 0 { + for iNdEx := len(m.RoleNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RoleNames[iNdEx]) + copy(dAtA[i:], m.RoleNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RoleNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *OAuthAccessToken) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthAccessToken) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthAccessToken) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.InactivityTimeoutSeconds)) + i-- + dAtA[i] = 0x50 + i -= len(m.RefreshToken) + copy(dAtA[i:], m.RefreshToken) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RefreshToken))) + i-- + dAtA[i] = 0x4a + i -= len(m.AuthorizeToken) + copy(dAtA[i:], m.AuthorizeToken) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuthorizeToken))) + i-- + dAtA[i] = 0x42 + i -= len(m.UserUID) + copy(dAtA[i:], m.UserUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserUID))) + i-- + dAtA[i] = 0x3a + i -= len(m.UserName) + copy(dAtA[i:], m.UserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserName))) + i-- + dAtA[i] = 0x32 + i -= len(m.RedirectURI) + copy(dAtA[i:], m.RedirectURI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RedirectURI))) + i-- + dAtA[i] = 0x2a + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ExpiresIn)) + i-- + dAtA[i] = 0x18 + i -= len(m.ClientName) + copy(dAtA[i:], m.ClientName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthAccessTokenList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthAccessTokenList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthAccessTokenList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthAuthorizeToken) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthAuthorizeToken) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthAuthorizeToken) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.CodeChallengeMethod) + copy(dAtA[i:], m.CodeChallengeMethod) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CodeChallengeMethod))) + i-- + dAtA[i] = 0x52 + i -= len(m.CodeChallenge) + copy(dAtA[i:], m.CodeChallenge) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CodeChallenge))) + i-- + dAtA[i] = 0x4a + i -= len(m.UserUID) + copy(dAtA[i:], m.UserUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserUID))) + i-- + dAtA[i] = 0x42 + i -= len(m.UserName) + copy(dAtA[i:], m.UserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserName))) + i-- + dAtA[i] = 0x3a + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x32 + i -= len(m.RedirectURI) + copy(dAtA[i:], m.RedirectURI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RedirectURI))) + i-- + dAtA[i] = 0x2a + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ExpiresIn)) + i-- + dAtA[i] = 0x18 + i -= len(m.ClientName) + copy(dAtA[i:], m.ClientName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthAuthorizeTokenList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthAuthorizeTokenList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthAuthorizeTokenList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthClient) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthClient) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthClient) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AccessTokenInactivityTimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AccessTokenInactivityTimeoutSeconds)) + i-- + dAtA[i] = 0x48 + } + if m.AccessTokenMaxAgeSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AccessTokenMaxAgeSeconds)) + i-- + dAtA[i] = 0x40 + } + if len(m.ScopeRestrictions) > 0 { + for iNdEx := len(m.ScopeRestrictions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ScopeRestrictions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + i -= len(m.GrantMethod) + copy(dAtA[i:], m.GrantMethod) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GrantMethod))) + i-- + dAtA[i] = 0x32 + if len(m.RedirectURIs) > 0 { + for iNdEx := len(m.RedirectURIs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RedirectURIs[iNdEx]) + copy(dAtA[i:], m.RedirectURIs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RedirectURIs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.RespondWithChallenges { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if len(m.AdditionalSecrets) > 0 { + for iNdEx := len(m.AdditionalSecrets) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdditionalSecrets[iNdEx]) + copy(dAtA[i:], m.AdditionalSecrets[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdditionalSecrets[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthClientAuthorization) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthClientAuthorization) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthClientAuthorization) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.UserUID) + copy(dAtA[i:], m.UserUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserUID))) + i-- + dAtA[i] = 0x22 + i -= len(m.UserName) + copy(dAtA[i:], m.UserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserName))) + i-- + dAtA[i] = 0x1a + i -= len(m.ClientName) + copy(dAtA[i:], m.ClientName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthClientAuthorizationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthClientAuthorizationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthClientAuthorizationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthClientList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthClientList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthClientList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthRedirectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthRedirectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthRedirectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Reference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RedirectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RedirectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RedirectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScopeRestriction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopeRestriction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScopeRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ClusterRole != nil { + { + size, err := m.ClusterRole.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ExactValues) > 0 { + for iNdEx := len(m.ExactValues) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExactValues[iNdEx]) + copy(dAtA[i:], m.ExactValues[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExactValues[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClusterRoleScopeRestriction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RoleNames) > 0 { + for _, s := range m.RoleNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + return n +} + +func (m *OAuthAccessToken) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ClientName) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.ExpiresIn)) + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RedirectURI) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuthorizeToken) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RefreshToken) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.InactivityTimeoutSeconds)) + return n +} + +func (m *OAuthAccessTokenList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OAuthAuthorizeToken) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ClientName) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.ExpiresIn)) + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RedirectURI) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.State) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CodeChallenge) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CodeChallengeMethod) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *OAuthAuthorizeTokenList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OAuthClient) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AdditionalSecrets) > 0 { + for _, s := range m.AdditionalSecrets { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.RedirectURIs) > 0 { + for _, s := range m.RedirectURIs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.GrantMethod) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ScopeRestrictions) > 0 { + for _, e := range m.ScopeRestrictions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AccessTokenMaxAgeSeconds != nil { + n += 1 + sovGenerated(uint64(*m.AccessTokenMaxAgeSeconds)) + } + if m.AccessTokenInactivityTimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.AccessTokenInactivityTimeoutSeconds)) + } + return n +} + +func (m *OAuthClientAuthorization) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ClientName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserUID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OAuthClientAuthorizationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OAuthClientList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OAuthRedirectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Reference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RedirectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScopeRestriction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ExactValues) > 0 { + for _, s := range m.ExactValues { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ClusterRole != nil { + l = m.ClusterRole.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterRoleScopeRestriction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleScopeRestriction{`, + `RoleNames:` + fmt.Sprintf("%v", this.RoleNames) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `AllowEscalation:` + fmt.Sprintf("%v", this.AllowEscalation) + `,`, + `}`, + }, "") + return s +} +func (this *OAuthAccessToken) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OAuthAccessToken{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ClientName:` + fmt.Sprintf("%v", this.ClientName) + `,`, + `ExpiresIn:` + fmt.Sprintf("%v", this.ExpiresIn) + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `RedirectURI:` + fmt.Sprintf("%v", this.RedirectURI) + `,`, + `UserName:` + fmt.Sprintf("%v", this.UserName) + `,`, + `UserUID:` + fmt.Sprintf("%v", this.UserUID) + `,`, + `AuthorizeToken:` + fmt.Sprintf("%v", this.AuthorizeToken) + `,`, + `RefreshToken:` + fmt.Sprintf("%v", this.RefreshToken) + `,`, + `InactivityTimeoutSeconds:` + fmt.Sprintf("%v", this.InactivityTimeoutSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *OAuthAccessTokenList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]OAuthAccessToken{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "OAuthAccessToken", "OAuthAccessToken", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&OAuthAccessTokenList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *OAuthAuthorizeToken) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OAuthAuthorizeToken{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ClientName:` + fmt.Sprintf("%v", this.ClientName) + `,`, + `ExpiresIn:` + fmt.Sprintf("%v", this.ExpiresIn) + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `RedirectURI:` + fmt.Sprintf("%v", this.RedirectURI) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `UserName:` + fmt.Sprintf("%v", this.UserName) + `,`, + `UserUID:` + fmt.Sprintf("%v", this.UserUID) + `,`, + `CodeChallenge:` + fmt.Sprintf("%v", this.CodeChallenge) + `,`, + `CodeChallengeMethod:` + fmt.Sprintf("%v", this.CodeChallengeMethod) + `,`, + `}`, + }, "") + return s +} +func (this *OAuthAuthorizeTokenList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]OAuthAuthorizeToken{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "OAuthAuthorizeToken", "OAuthAuthorizeToken", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&OAuthAuthorizeTokenList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *OAuthClient) String() string { + if this == nil { + return "nil" + } + repeatedStringForScopeRestrictions := "[]ScopeRestriction{" + for _, f := range this.ScopeRestrictions { + repeatedStringForScopeRestrictions += strings.Replace(strings.Replace(f.String(), "ScopeRestriction", "ScopeRestriction", 1), `&`, ``, 1) + "," + } + repeatedStringForScopeRestrictions += "}" + s := strings.Join([]string{`&OAuthClient{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `AdditionalSecrets:` + fmt.Sprintf("%v", this.AdditionalSecrets) + `,`, + `RespondWithChallenges:` + fmt.Sprintf("%v", this.RespondWithChallenges) + `,`, + `RedirectURIs:` + fmt.Sprintf("%v", this.RedirectURIs) + `,`, + `GrantMethod:` + fmt.Sprintf("%v", this.GrantMethod) + `,`, + `ScopeRestrictions:` + repeatedStringForScopeRestrictions + `,`, + `AccessTokenMaxAgeSeconds:` + valueToStringGenerated(this.AccessTokenMaxAgeSeconds) + `,`, + `AccessTokenInactivityTimeoutSeconds:` + valueToStringGenerated(this.AccessTokenInactivityTimeoutSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *OAuthClientAuthorization) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OAuthClientAuthorization{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ClientName:` + fmt.Sprintf("%v", this.ClientName) + `,`, + `UserName:` + fmt.Sprintf("%v", this.UserName) + `,`, + `UserUID:` + fmt.Sprintf("%v", this.UserUID) + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `}`, + }, "") + return s +} +func (this *OAuthClientAuthorizationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]OAuthClientAuthorization{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "OAuthClientAuthorization", "OAuthClientAuthorization", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&OAuthClientAuthorizationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *OAuthClientList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]OAuthClient{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "OAuthClient", "OAuthClient", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&OAuthClientList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *OAuthRedirectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OAuthRedirectReference{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "RedirectReference", "RedirectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RedirectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RedirectReference{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *ScopeRestriction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScopeRestriction{`, + `ExactValues:` + fmt.Sprintf("%v", this.ExactValues) + `,`, + `ClusterRole:` + strings.Replace(this.ClusterRole.String(), "ClusterRoleScopeRestriction", "ClusterRoleScopeRestriction", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterRoleScopeRestriction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleScopeRestriction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleScopeRestriction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RoleNames = append(m.RoleNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowEscalation = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthAccessToken) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthAccessToken: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthAccessToken: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpiresIn", wireType) + } + m.ExpiresIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpiresIn |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RedirectURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RedirectURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthorizeToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuthorizeToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RefreshToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RefreshToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InactivityTimeoutSeconds", wireType) + } + m.InactivityTimeoutSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InactivityTimeoutSeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthAccessTokenList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthAccessTokenList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthAccessTokenList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, OAuthAccessToken{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthAuthorizeToken) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthAuthorizeToken: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthAuthorizeToken: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpiresIn", wireType) + } + m.ExpiresIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpiresIn |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RedirectURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RedirectURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CodeChallenge", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CodeChallenge = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CodeChallengeMethod", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CodeChallengeMethod = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthAuthorizeTokenList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthAuthorizeTokenList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthAuthorizeTokenList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, OAuthAuthorizeToken{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthClient) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthClient: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthClient: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalSecrets", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalSecrets = append(m.AdditionalSecrets, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RespondWithChallenges", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RespondWithChallenges = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RedirectURIs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RedirectURIs = append(m.RedirectURIs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GrantMethod", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GrantMethod = GrantHandlerType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScopeRestrictions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScopeRestrictions = append(m.ScopeRestrictions, ScopeRestriction{}) + if err := m.ScopeRestrictions[len(m.ScopeRestrictions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessTokenMaxAgeSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AccessTokenMaxAgeSeconds = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessTokenInactivityTimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AccessTokenInactivityTimeoutSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthClientAuthorization) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthClientAuthorization: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthClientAuthorization: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthClientAuthorizationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthClientAuthorizationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthClientAuthorizationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, OAuthClientAuthorization{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthClientList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthClientList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthClientList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, OAuthClient{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthRedirectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthRedirectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthRedirectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Reference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RedirectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RedirectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RedirectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScopeRestriction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScopeRestriction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScopeRestriction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExactValues", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExactValues = append(m.ExactValues, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRole", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterRole == nil { + m.ClusterRole = &ClusterRoleScopeRestriction{} + } + if err := m.ClusterRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/openshift/api/oauth/v1/generated.proto b/vendor/github.com/openshift/api/oauth/v1/generated.proto new file mode 100644 index 0000000000000000000000000000000000000000..e7a8c5c1e7d1c3e60769b01b4f5610d905b3c060 --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/generated.proto @@ -0,0 +1,218 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package github.com.openshift.api.oauth.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// ClusterRoleScopeRestriction describes restrictions on cluster role scopes +message ClusterRoleScopeRestriction { + // RoleNames is the list of cluster roles that can referenced. * means anything + repeated string roleNames = 1; + + // Namespaces is the list of namespaces that can be referenced. * means any of them (including *) + repeated string namespaces = 2; + + // AllowEscalation indicates whether you can request roles and their escalating resources + optional bool allowEscalation = 3; +} + +// OAuthAccessToken describes an OAuth access token +message OAuthAccessToken { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // ClientName references the client that created this token. + optional string clientName = 2; + + // ExpiresIn is the seconds from CreationTime before this token expires. + optional int64 expiresIn = 3; + + // Scopes is an array of the requested scopes. + repeated string scopes = 4; + + // RedirectURI is the redirection associated with the token. + optional string redirectURI = 5; + + // UserName is the user name associated with this token + optional string userName = 6; + + // UserUID is the unique UID associated with this token + optional string userUID = 7; + + // AuthorizeToken contains the token that authorized this token + optional string authorizeToken = 8; + + // RefreshToken is the value by which this token can be renewed. Can be blank. + optional string refreshToken = 9; + + // InactivityTimeoutSeconds is the value in seconds, from the + // CreationTimestamp, after which this token can no longer be used. + // The value is automatically incremented when the token is used. + optional int32 inactivityTimeoutSeconds = 10; +} + +// OAuthAccessTokenList is a collection of OAuth access tokens +message OAuthAccessTokenList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of OAuth access tokens + repeated OAuthAccessToken items = 2; +} + +// OAuthAuthorizeToken describes an OAuth authorization token +message OAuthAuthorizeToken { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // ClientName references the client that created this token. + optional string clientName = 2; + + // ExpiresIn is the seconds from CreationTime before this token expires. + optional int64 expiresIn = 3; + + // Scopes is an array of the requested scopes. + repeated string scopes = 4; + + // RedirectURI is the redirection associated with the token. + optional string redirectURI = 5; + + // State data from request + optional string state = 6; + + // UserName is the user name associated with this token + optional string userName = 7; + + // UserUID is the unique UID associated with this token. UserUID and UserName must both match + // for this token to be valid. + optional string userUID = 8; + + // CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636 + optional string codeChallenge = 9; + + // CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636 + optional string codeChallengeMethod = 10; +} + +// OAuthAuthorizeTokenList is a collection of OAuth authorization tokens +message OAuthAuthorizeTokenList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of OAuth authorization tokens + repeated OAuthAuthorizeToken items = 2; +} + +// OAuthClient describes an OAuth client +message OAuthClient { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Secret is the unique secret associated with a client + optional string secret = 2; + + // AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation + // and for service account token validation + repeated string additionalSecrets = 3; + + // RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects + optional bool respondWithChallenges = 4; + + // RedirectURIs is the valid redirection URIs associated with a client + // +patchStrategy=merge + repeated string redirectURIs = 5; + + // GrantMethod is a required field which determines how to handle grants for this client. + // Valid grant handling methods are: + // - auto: always approves grant requests, useful for trusted clients + // - prompt: prompts the end user for approval of grant requests, useful for third-party clients + optional string grantMethod = 6; + + // ScopeRestrictions describes which scopes this client can request. Each requested scope + // is checked against each restriction. If any restriction matches, then the scope is allowed. + // If no restriction matches, then the scope is denied. + repeated ScopeRestriction scopeRestrictions = 7; + + // AccessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. + // 0 means no expiration. + optional int32 accessTokenMaxAgeSeconds = 8; + + // AccessTokenInactivityTimeoutSeconds overrides the default token + // inactivity timeout for tokens granted to this client. + // The value represents the maximum amount of time that can occur between + // consecutive uses of the token. Tokens become invalid if they are not + // used within this temporal window. The user will need to acquire a new + // token to regain access once a token times out. + // This value needs to be set only if the default set in configuration is + // not appropriate for this client. Valid values are: + // - 0: Tokens for this client never time out + // - X: Tokens time out if there is no activity for X seconds + // The current minimum allowed value for X is 300 (5 minutes) + optional int32 accessTokenInactivityTimeoutSeconds = 9; +} + +// OAuthClientAuthorization describes an authorization created by an OAuth client +message OAuthClientAuthorization { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // ClientName references the client that created this authorization + optional string clientName = 2; + + // UserName is the user name that authorized this client + optional string userName = 3; + + // UserUID is the unique UID associated with this authorization. UserUID and UserName + // must both match for this authorization to be valid. + optional string userUID = 4; + + // Scopes is an array of the granted scopes. + repeated string scopes = 5; +} + +// OAuthClientAuthorizationList is a collection of OAuth client authorizations +message OAuthClientAuthorizationList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of OAuth client authorizations + repeated OAuthClientAuthorization items = 2; +} + +// OAuthClientList is a collection of OAuth clients +message OAuthClientList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of OAuth clients + repeated OAuthClient items = 2; +} + +// OAuthRedirectReference is a reference to an OAuth redirect object. +message OAuthRedirectReference { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The reference to an redirect object in the current namespace. + optional RedirectReference reference = 2; +} + +// RedirectReference specifies the target in the current namespace that resolves into redirect URIs. Only the 'Route' kind is currently allowed. +message RedirectReference { + // The group of the target that is being referred to. + optional string group = 1; + + // The kind of the target that is being referred to. Currently, only 'Route' is allowed. + optional string kind = 2; + + // The name of the target that is being referred to. e.g. name of the Route. + optional string name = 3; +} + +// ScopeRestriction describe one restriction on scopes. Exactly one option must be non-nil. +message ScopeRestriction { + // ExactValues means the scope has to match a particular set of strings exactly + repeated string literals = 1; + + // ClusterRole describes a set of restrictions for cluster role scoping. + optional ClusterRoleScopeRestriction clusterRole = 2; +} + diff --git a/vendor/github.com/openshift/api/oauth/v1/legacy.go b/vendor/github.com/openshift/api/oauth/v1/legacy.go new file mode 100644 index 0000000000000000000000000000000000000000..65b57d2431255e4275dda733cbef76d4c06431dc --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/legacy.go @@ -0,0 +1,30 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &OAuthAccessToken{}, + &OAuthAccessTokenList{}, + &OAuthAuthorizeToken{}, + &OAuthAuthorizeTokenList{}, + &OAuthClient{}, + &OAuthClientList{}, + &OAuthClientAuthorization{}, + &OAuthClientAuthorizationList{}, + &OAuthRedirectReference{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/oauth/v1/register.go b/vendor/github.com/openshift/api/oauth/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..37278c64147cb53eb668672a29d58dd7266dd49d --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/register.go @@ -0,0 +1,45 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "oauth.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &OAuthAccessToken{}, + &OAuthAccessTokenList{}, + &OAuthAuthorizeToken{}, + &OAuthAuthorizeTokenList{}, + &OAuthClient{}, + &OAuthClientList{}, + &OAuthClientAuthorization{}, + &OAuthClientAuthorizationList{}, + &OAuthRedirectReference{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/oauth/v1/types.go b/vendor/github.com/openshift/api/oauth/v1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..64bc17304706b877872e6fd14bc2c3b42a77f6b7 --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/types.go @@ -0,0 +1,254 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthAccessToken describes an OAuth access token +type OAuthAccessToken struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // ClientName references the client that created this token. + ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"` + + // ExpiresIn is the seconds from CreationTime before this token expires. + ExpiresIn int64 `json:"expiresIn,omitempty" protobuf:"varint,3,opt,name=expiresIn"` + + // Scopes is an array of the requested scopes. + Scopes []string `json:"scopes,omitempty" protobuf:"bytes,4,rep,name=scopes"` + + // RedirectURI is the redirection associated with the token. + RedirectURI string `json:"redirectURI,omitempty" protobuf:"bytes,5,opt,name=redirectURI"` + + // UserName is the user name associated with this token + UserName string `json:"userName,omitempty" protobuf:"bytes,6,opt,name=userName"` + + // UserUID is the unique UID associated with this token + UserUID string `json:"userUID,omitempty" protobuf:"bytes,7,opt,name=userUID"` + + // AuthorizeToken contains the token that authorized this token + AuthorizeToken string `json:"authorizeToken,omitempty" protobuf:"bytes,8,opt,name=authorizeToken"` + + // RefreshToken is the value by which this token can be renewed. Can be blank. + RefreshToken string `json:"refreshToken,omitempty" protobuf:"bytes,9,opt,name=refreshToken"` + + // InactivityTimeoutSeconds is the value in seconds, from the + // CreationTimestamp, after which this token can no longer be used. + // The value is automatically incremented when the token is used. + InactivityTimeoutSeconds int32 `json:"inactivityTimeoutSeconds,omitempty" protobuf:"varint,10,opt,name=inactivityTimeoutSeconds"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthAuthorizeToken describes an OAuth authorization token +type OAuthAuthorizeToken struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // ClientName references the client that created this token. + ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"` + + // ExpiresIn is the seconds from CreationTime before this token expires. + ExpiresIn int64 `json:"expiresIn,omitempty" protobuf:"varint,3,opt,name=expiresIn"` + + // Scopes is an array of the requested scopes. + Scopes []string `json:"scopes,omitempty" protobuf:"bytes,4,rep,name=scopes"` + + // RedirectURI is the redirection associated with the token. + RedirectURI string `json:"redirectURI,omitempty" protobuf:"bytes,5,opt,name=redirectURI"` + + // State data from request + State string `json:"state,omitempty" protobuf:"bytes,6,opt,name=state"` + + // UserName is the user name associated with this token + UserName string `json:"userName,omitempty" protobuf:"bytes,7,opt,name=userName"` + + // UserUID is the unique UID associated with this token. UserUID and UserName must both match + // for this token to be valid. + UserUID string `json:"userUID,omitempty" protobuf:"bytes,8,opt,name=userUID"` + + // CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636 + CodeChallenge string `json:"codeChallenge,omitempty" protobuf:"bytes,9,opt,name=codeChallenge"` + + // CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636 + CodeChallengeMethod string `json:"codeChallengeMethod,omitempty" protobuf:"bytes,10,opt,name=codeChallengeMethod"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthClient describes an OAuth client +type OAuthClient struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Secret is the unique secret associated with a client + Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` + + // AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation + // and for service account token validation + AdditionalSecrets []string `json:"additionalSecrets,omitempty" protobuf:"bytes,3,rep,name=additionalSecrets"` + + // RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects + RespondWithChallenges bool `json:"respondWithChallenges,omitempty" protobuf:"varint,4,opt,name=respondWithChallenges"` + + // RedirectURIs is the valid redirection URIs associated with a client + // +patchStrategy=merge + RedirectURIs []string `json:"redirectURIs,omitempty" patchStrategy:"merge" protobuf:"bytes,5,rep,name=redirectURIs"` + + // GrantMethod is a required field which determines how to handle grants for this client. + // Valid grant handling methods are: + // - auto: always approves grant requests, useful for trusted clients + // - prompt: prompts the end user for approval of grant requests, useful for third-party clients + GrantMethod GrantHandlerType `json:"grantMethod,omitempty" protobuf:"bytes,6,opt,name=grantMethod,casttype=GrantHandlerType"` + + // ScopeRestrictions describes which scopes this client can request. Each requested scope + // is checked against each restriction. If any restriction matches, then the scope is allowed. + // If no restriction matches, then the scope is denied. + ScopeRestrictions []ScopeRestriction `json:"scopeRestrictions,omitempty" protobuf:"bytes,7,rep,name=scopeRestrictions"` + + // AccessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. + // 0 means no expiration. + AccessTokenMaxAgeSeconds *int32 `json:"accessTokenMaxAgeSeconds,omitempty" protobuf:"varint,8,opt,name=accessTokenMaxAgeSeconds"` + + // AccessTokenInactivityTimeoutSeconds overrides the default token + // inactivity timeout for tokens granted to this client. + // The value represents the maximum amount of time that can occur between + // consecutive uses of the token. Tokens become invalid if they are not + // used within this temporal window. The user will need to acquire a new + // token to regain access once a token times out. + // This value needs to be set only if the default set in configuration is + // not appropriate for this client. Valid values are: + // - 0: Tokens for this client never time out + // - X: Tokens time out if there is no activity for X seconds + // The current minimum allowed value for X is 300 (5 minutes) + AccessTokenInactivityTimeoutSeconds *int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty" protobuf:"varint,9,opt,name=accessTokenInactivityTimeoutSeconds"` +} + +type GrantHandlerType string + +const ( + // GrantHandlerAuto auto-approves client authorization grant requests + GrantHandlerAuto GrantHandlerType = "auto" + // GrantHandlerPrompt prompts the user to approve new client authorization grant requests + GrantHandlerPrompt GrantHandlerType = "prompt" + // GrantHandlerDeny auto-denies client authorization grant requests + GrantHandlerDeny GrantHandlerType = "deny" +) + +// ScopeRestriction describe one restriction on scopes. Exactly one option must be non-nil. +type ScopeRestriction struct { + // ExactValues means the scope has to match a particular set of strings exactly + ExactValues []string `json:"literals,omitempty" protobuf:"bytes,1,rep,name=literals"` + + // ClusterRole describes a set of restrictions for cluster role scoping. + ClusterRole *ClusterRoleScopeRestriction `json:"clusterRole,omitempty" protobuf:"bytes,2,opt,name=clusterRole"` +} + +// ClusterRoleScopeRestriction describes restrictions on cluster role scopes +type ClusterRoleScopeRestriction struct { + // RoleNames is the list of cluster roles that can referenced. * means anything + RoleNames []string `json:"roleNames" protobuf:"bytes,1,rep,name=roleNames"` + // Namespaces is the list of namespaces that can be referenced. * means any of them (including *) + Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` + // AllowEscalation indicates whether you can request roles and their escalating resources + AllowEscalation bool `json:"allowEscalation" protobuf:"varint,3,opt,name=allowEscalation"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthClientAuthorization describes an authorization created by an OAuth client +type OAuthClientAuthorization struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // ClientName references the client that created this authorization + ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"` + + // UserName is the user name that authorized this client + UserName string `json:"userName,omitempty" protobuf:"bytes,3,opt,name=userName"` + + // UserUID is the unique UID associated with this authorization. UserUID and UserName + // must both match for this authorization to be valid. + UserUID string `json:"userUID,omitempty" protobuf:"bytes,4,opt,name=userUID"` + + // Scopes is an array of the granted scopes. + Scopes []string `json:"scopes,omitempty" protobuf:"bytes,5,rep,name=scopes"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthAccessTokenList is a collection of OAuth access tokens +type OAuthAccessTokenList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of OAuth access tokens + Items []OAuthAccessToken `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthAuthorizeTokenList is a collection of OAuth authorization tokens +type OAuthAuthorizeTokenList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of OAuth authorization tokens + Items []OAuthAuthorizeToken `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthClientList is a collection of OAuth clients +type OAuthClientList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of OAuth clients + Items []OAuthClient `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthClientAuthorizationList is a collection of OAuth client authorizations +type OAuthClientAuthorizationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of OAuth client authorizations + Items []OAuthClientAuthorization `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthRedirectReference is a reference to an OAuth redirect object. +type OAuthRedirectReference struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The reference to an redirect object in the current namespace. + Reference RedirectReference `json:"reference,omitempty" protobuf:"bytes,2,opt,name=reference"` +} + +// RedirectReference specifies the target in the current namespace that resolves into redirect URIs. Only the 'Route' kind is currently allowed. +type RedirectReference struct { + // The group of the target that is being referred to. + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + + // The kind of the target that is being referred to. Currently, only 'Route' is allowed. + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` + + // The name of the target that is being referred to. e.g. name of the Route. + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` +} diff --git a/vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..4506548c69d204d84a71502714cdcf49de7e71c9 --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go @@ -0,0 +1,382 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleScopeRestriction) DeepCopyInto(out *ClusterRoleScopeRestriction) { + *out = *in + if in.RoleNames != nil { + in, out := &in.RoleNames, &out.RoleNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleScopeRestriction. +func (in *ClusterRoleScopeRestriction) DeepCopy() *ClusterRoleScopeRestriction { + if in == nil { + return nil + } + out := new(ClusterRoleScopeRestriction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthAccessToken) DeepCopyInto(out *OAuthAccessToken) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAccessToken. +func (in *OAuthAccessToken) DeepCopy() *OAuthAccessToken { + if in == nil { + return nil + } + out := new(OAuthAccessToken) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthAccessToken) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthAccessTokenList) DeepCopyInto(out *OAuthAccessTokenList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OAuthAccessToken, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAccessTokenList. +func (in *OAuthAccessTokenList) DeepCopy() *OAuthAccessTokenList { + if in == nil { + return nil + } + out := new(OAuthAccessTokenList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthAccessTokenList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthAuthorizeToken) DeepCopyInto(out *OAuthAuthorizeToken) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAuthorizeToken. +func (in *OAuthAuthorizeToken) DeepCopy() *OAuthAuthorizeToken { + if in == nil { + return nil + } + out := new(OAuthAuthorizeToken) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthAuthorizeToken) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthAuthorizeTokenList) DeepCopyInto(out *OAuthAuthorizeTokenList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OAuthAuthorizeToken, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAuthorizeTokenList. +func (in *OAuthAuthorizeTokenList) DeepCopy() *OAuthAuthorizeTokenList { + if in == nil { + return nil + } + out := new(OAuthAuthorizeTokenList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthAuthorizeTokenList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthClient) DeepCopyInto(out *OAuthClient) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.AdditionalSecrets != nil { + in, out := &in.AdditionalSecrets, &out.AdditionalSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.RedirectURIs != nil { + in, out := &in.RedirectURIs, &out.RedirectURIs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ScopeRestrictions != nil { + in, out := &in.ScopeRestrictions, &out.ScopeRestrictions + *out = make([]ScopeRestriction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AccessTokenMaxAgeSeconds != nil { + in, out := &in.AccessTokenMaxAgeSeconds, &out.AccessTokenMaxAgeSeconds + *out = new(int32) + **out = **in + } + if in.AccessTokenInactivityTimeoutSeconds != nil { + in, out := &in.AccessTokenInactivityTimeoutSeconds, &out.AccessTokenInactivityTimeoutSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClient. +func (in *OAuthClient) DeepCopy() *OAuthClient { + if in == nil { + return nil + } + out := new(OAuthClient) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthClient) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthClientAuthorization) DeepCopyInto(out *OAuthClientAuthorization) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClientAuthorization. +func (in *OAuthClientAuthorization) DeepCopy() *OAuthClientAuthorization { + if in == nil { + return nil + } + out := new(OAuthClientAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthClientAuthorization) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthClientAuthorizationList) DeepCopyInto(out *OAuthClientAuthorizationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OAuthClientAuthorization, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClientAuthorizationList. +func (in *OAuthClientAuthorizationList) DeepCopy() *OAuthClientAuthorizationList { + if in == nil { + return nil + } + out := new(OAuthClientAuthorizationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthClientAuthorizationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthClientList) DeepCopyInto(out *OAuthClientList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OAuthClient, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClientList. +func (in *OAuthClientList) DeepCopy() *OAuthClientList { + if in == nil { + return nil + } + out := new(OAuthClientList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthClientList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthRedirectReference) DeepCopyInto(out *OAuthRedirectReference) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Reference = in.Reference + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthRedirectReference. +func (in *OAuthRedirectReference) DeepCopy() *OAuthRedirectReference { + if in == nil { + return nil + } + out := new(OAuthRedirectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthRedirectReference) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectReference) DeepCopyInto(out *RedirectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectReference. +func (in *RedirectReference) DeepCopy() *RedirectReference { + if in == nil { + return nil + } + out := new(RedirectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeRestriction) DeepCopyInto(out *ScopeRestriction) { + *out = *in + if in.ExactValues != nil { + in, out := &in.ExactValues, &out.ExactValues + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ClusterRole != nil { + in, out := &in.ClusterRole, &out.ClusterRole + *out = new(ClusterRoleScopeRestriction) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeRestriction. +func (in *ScopeRestriction) DeepCopy() *ScopeRestriction { + if in == nil { + return nil + } + out := new(ScopeRestriction) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..84ddf18ecd91341b8a1c36b8c8f89a63af393a32 --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,153 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ClusterRoleScopeRestriction = map[string]string{ + "": "ClusterRoleScopeRestriction describes restrictions on cluster role scopes", + "roleNames": "RoleNames is the list of cluster roles that can referenced. * means anything", + "namespaces": "Namespaces is the list of namespaces that can be referenced. * means any of them (including *)", + "allowEscalation": "AllowEscalation indicates whether you can request roles and their escalating resources", +} + +func (ClusterRoleScopeRestriction) SwaggerDoc() map[string]string { + return map_ClusterRoleScopeRestriction +} + +var map_OAuthAccessToken = map[string]string{ + "": "OAuthAccessToken describes an OAuth access token", + "clientName": "ClientName references the client that created this token.", + "expiresIn": "ExpiresIn is the seconds from CreationTime before this token expires.", + "scopes": "Scopes is an array of the requested scopes.", + "redirectURI": "RedirectURI is the redirection associated with the token.", + "userName": "UserName is the user name associated with this token", + "userUID": "UserUID is the unique UID associated with this token", + "authorizeToken": "AuthorizeToken contains the token that authorized this token", + "refreshToken": "RefreshToken is the value by which this token can be renewed. Can be blank.", + "inactivityTimeoutSeconds": "InactivityTimeoutSeconds is the value in seconds, from the CreationTimestamp, after which this token can no longer be used. The value is automatically incremented when the token is used.", +} + +func (OAuthAccessToken) SwaggerDoc() map[string]string { + return map_OAuthAccessToken +} + +var map_OAuthAccessTokenList = map[string]string{ + "": "OAuthAccessTokenList is a collection of OAuth access tokens", + "items": "Items is the list of OAuth access tokens", +} + +func (OAuthAccessTokenList) SwaggerDoc() map[string]string { + return map_OAuthAccessTokenList +} + +var map_OAuthAuthorizeToken = map[string]string{ + "": "OAuthAuthorizeToken describes an OAuth authorization token", + "clientName": "ClientName references the client that created this token.", + "expiresIn": "ExpiresIn is the seconds from CreationTime before this token expires.", + "scopes": "Scopes is an array of the requested scopes.", + "redirectURI": "RedirectURI is the redirection associated with the token.", + "state": "State data from request", + "userName": "UserName is the user name associated with this token", + "userUID": "UserUID is the unique UID associated with this token. UserUID and UserName must both match for this token to be valid.", + "codeChallenge": "CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636", + "codeChallengeMethod": "CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636", +} + +func (OAuthAuthorizeToken) SwaggerDoc() map[string]string { + return map_OAuthAuthorizeToken +} + +var map_OAuthAuthorizeTokenList = map[string]string{ + "": "OAuthAuthorizeTokenList is a collection of OAuth authorization tokens", + "items": "Items is the list of OAuth authorization tokens", +} + +func (OAuthAuthorizeTokenList) SwaggerDoc() map[string]string { + return map_OAuthAuthorizeTokenList +} + +var map_OAuthClient = map[string]string{ + "": "OAuthClient describes an OAuth client", + "secret": "Secret is the unique secret associated with a client", + "additionalSecrets": "AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation and for service account token validation", + "respondWithChallenges": "RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects", + "redirectURIs": "RedirectURIs is the valid redirection URIs associated with a client", + "grantMethod": "GrantMethod is a required field which determines how to handle grants for this client. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients", + "scopeRestrictions": "ScopeRestrictions describes which scopes this client can request. Each requested scope is checked against each restriction. If any restriction matches, then the scope is allowed. If no restriction matches, then the scope is denied.", + "accessTokenMaxAgeSeconds": "AccessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. 0 means no expiration.", + "accessTokenInactivityTimeoutSeconds": "AccessTokenInactivityTimeoutSeconds overrides the default token inactivity timeout for tokens granted to this client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. This value needs to be set only if the default set in configuration is not appropriate for this client. Valid values are: - 0: Tokens for this client never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)", +} + +func (OAuthClient) SwaggerDoc() map[string]string { + return map_OAuthClient +} + +var map_OAuthClientAuthorization = map[string]string{ + "": "OAuthClientAuthorization describes an authorization created by an OAuth client", + "clientName": "ClientName references the client that created this authorization", + "userName": "UserName is the user name that authorized this client", + "userUID": "UserUID is the unique UID associated with this authorization. UserUID and UserName must both match for this authorization to be valid.", + "scopes": "Scopes is an array of the granted scopes.", +} + +func (OAuthClientAuthorization) SwaggerDoc() map[string]string { + return map_OAuthClientAuthorization +} + +var map_OAuthClientAuthorizationList = map[string]string{ + "": "OAuthClientAuthorizationList is a collection of OAuth client authorizations", + "items": "Items is the list of OAuth client authorizations", +} + +func (OAuthClientAuthorizationList) SwaggerDoc() map[string]string { + return map_OAuthClientAuthorizationList +} + +var map_OAuthClientList = map[string]string{ + "": "OAuthClientList is a collection of OAuth clients", + "items": "Items is the list of OAuth clients", +} + +func (OAuthClientList) SwaggerDoc() map[string]string { + return map_OAuthClientList +} + +var map_OAuthRedirectReference = map[string]string{ + "": "OAuthRedirectReference is a reference to an OAuth redirect object.", + "reference": "The reference to an redirect object in the current namespace.", +} + +func (OAuthRedirectReference) SwaggerDoc() map[string]string { + return map_OAuthRedirectReference +} + +var map_RedirectReference = map[string]string{ + "": "RedirectReference specifies the target in the current namespace that resolves into redirect URIs. Only the 'Route' kind is currently allowed.", + "group": "The group of the target that is being referred to.", + "kind": "The kind of the target that is being referred to. Currently, only 'Route' is allowed.", + "name": "The name of the target that is being referred to. e.g. name of the Route.", +} + +func (RedirectReference) SwaggerDoc() map[string]string { + return map_RedirectReference +} + +var map_ScopeRestriction = map[string]string{ + "": "ScopeRestriction describe one restriction on scopes. Exactly one option must be non-nil.", + "literals": "ExactValues means the scope has to match a particular set of strings exactly", + "clusterRole": "ClusterRole describes a set of restrictions for cluster role scoping.", +} + +func (ScopeRestriction) SwaggerDoc() map[string]string { + return map_ScopeRestriction +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/install.go b/vendor/github.com/openshift/api/openshiftcontrolplane/install.go new file mode 100644 index 0000000000000000000000000000000000000000..5c745fd7ffab35bd6d2dbb340f60ae56f8e5fe2f --- /dev/null +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/install.go @@ -0,0 +1,26 @@ +package openshiftcontrolplane + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + openshiftcontrolplanev1 "github.com/openshift/api/openshiftcontrolplane/v1" +) + +const ( + GroupName = "openshiftcontrolplane.config.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(openshiftcontrolplanev1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/doc.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..4528e3c4a68346a4410f5a80913ede958dfd9086 --- /dev/null +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=openshiftcontrolplane.config.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/register.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..3d0bb20f220b34f687c86b49aff5d6721611383f --- /dev/null +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/register.go @@ -0,0 +1,40 @@ +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + osinv1 "github.com/openshift/api/osin/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "openshiftcontrolplane.config.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, osinv1.Install, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &OpenShiftAPIServerConfig{}, + &OpenShiftControllerManagerConfig{}, + &BuildDefaultsConfig{}, + &BuildOverridesConfig{}, + ) + return nil +} diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..dddb4cfb89018fb02e369a12adca6c6400c11eac --- /dev/null +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go @@ -0,0 +1,370 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + buildv1 "github.com/openshift/api/build/v1" + configv1 "github.com/openshift/api/config/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type OpenShiftAPIServerConfig struct { + metav1.TypeMeta `json:",inline"` + + // provides the standard apiserver configuration + configv1.GenericAPIServerConfig `json:",inline"` + + // aggregatorConfig contains information about how to verify the aggregator front proxy + AggregatorConfig FrontProxyConfig `json:"aggregatorConfig"` + + // imagePolicyConfig feeds the image policy admission plugin + ImagePolicyConfig ImagePolicyConfig `json:"imagePolicyConfig"` + + // projectConfig feeds an admission plugin + ProjectConfig ProjectConfig `json:"projectConfig"` + + // routingConfig holds information about routing and route generation + RoutingConfig RoutingConfig `json:"routingConfig"` + + // serviceAccountOAuthGrantMethod is used for determining client authorization for service account oauth client. + // It must be either: deny, prompt, or "" + ServiceAccountOAuthGrantMethod GrantHandlerType `json:"serviceAccountOAuthGrantMethod"` + + // jenkinsPipelineConfig holds information about the default Jenkins template + // used for JenkinsPipeline build strategy. + // TODO this needs to become a normal plugin config + JenkinsPipelineConfig JenkinsPipelineConfig `json:"jenkinsPipelineConfig"` + + // cloudProviderFile points to the cloud config file + // TODO this needs to become a normal plugin config + CloudProviderFile string `json:"cloudProviderFile"` + + // TODO this needs to be removed. + APIServerArguments map[string][]string `json:"apiServerArguments"` +} + +type FrontProxyConfig struct { + // clientCA is a path to the CA bundle to use to verify the common name of the front proxy's client cert + ClientCA string `json:"clientCA"` + // allowedNames is an optional list of common names to require a match from. + AllowedNames []string `json:"allowedNames"` + + // usernameHeaders is the set of headers to check for the username + UsernameHeaders []string `json:"usernameHeaders"` + // groupHeaders is the set of headers to check for groups + GroupHeaders []string `json:"groupHeaders"` + // extraHeaderPrefixes is the set of header prefixes to check for user extra + ExtraHeaderPrefixes []string `json:"extraHeaderPrefixes"` +} + +type GrantHandlerType string + +const ( + // GrantHandlerAuto auto-approves client authorization grant requests + GrantHandlerAuto GrantHandlerType = "auto" + // GrantHandlerPrompt prompts the user to approve new client authorization grant requests + GrantHandlerPrompt GrantHandlerType = "prompt" + // GrantHandlerDeny auto-denies client authorization grant requests + GrantHandlerDeny GrantHandlerType = "deny" +) + +// RoutingConfig holds the necessary configuration options for routing to subdomains +type RoutingConfig struct { + // subdomain is the suffix appended to $service.$namespace. to form the default route hostname + // DEPRECATED: This field is being replaced by routers setting their own defaults. This is the + // "default" route. + Subdomain string `json:"subdomain"` +} + +type ImagePolicyConfig struct { + // maxImagesBulkImportedPerRepository controls the number of images that are imported when a user + // does a bulk import of a container repository. This number is set low to prevent users from + // importing large numbers of images accidentally. Set -1 for no limit. + MaxImagesBulkImportedPerRepository int `json:"maxImagesBulkImportedPerRepository"` + // allowedRegistriesForImport limits the container image registries that normal users may import + // images from. Set this list to the registries that you trust to contain valid Docker + // images and that you want applications to be able to import from. Users with + // permission to create Images or ImageStreamMappings via the API are not affected by + // this policy - typically only administrators or system integrations will have those + // permissions. + AllowedRegistriesForImport AllowedRegistries `json:"allowedRegistriesForImport"` + + // internalRegistryHostname sets the hostname for the default internal image + // registry. The value must be in "hostname[:port]" format. + // For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY + // environment variable but this setting overrides the environment variable. + InternalRegistryHostname string `json:"internalRegistryHostname"` + // externalRegistryHostnames provides the hostnames for the default external image + // registry. The external hostname should be set only when the image registry + // is exposed externally. The first value is used in 'publicDockerImageRepository' + // field in ImageStreams. The value must be in "hostname[:port]" format. + ExternalRegistryHostnames []string `json:"externalRegistryHostnames"` + + // additionalTrustedCA is a path to a pem bundle file containing additional CAs that + // should be trusted during imagestream import. + AdditionalTrustedCA string `json:"additionalTrustedCA"` +} + +// AllowedRegistries represents a list of registries allowed for the image import. +type AllowedRegistries []RegistryLocation + +// RegistryLocation contains a location of the registry specified by the registry domain +// name. The domain name might include wildcards, like '*' or '??'. +type RegistryLocation struct { + // DomainName specifies a domain name for the registry + // In case the registry use non-standard (80 or 443) port, the port should be included + // in the domain name as well. + DomainName string `json:"domainName"` + // Insecure indicates whether the registry is secure (https) or insecure (http) + // By default (if not specified) the registry is assumed as secure. + Insecure bool `json:"insecure,omitempty"` +} + +type ProjectConfig struct { + // defaultNodeSelector holds default project node label selector + DefaultNodeSelector string `json:"defaultNodeSelector"` + + // projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint + ProjectRequestMessage string `json:"projectRequestMessage"` + + // projectRequestTemplate is the template to use for creating projects in response to projectrequest. + // It is in the format namespace/template and it is optional. + // If it is not specified, a default template is used. + ProjectRequestTemplate string `json:"projectRequestTemplate"` +} + +// JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy +type JenkinsPipelineConfig struct { + // autoProvisionEnabled determines whether a Jenkins server will be spawned from the provided + // template when the first build config in the project with type JenkinsPipeline + // is created. When not specified this option defaults to true. + AutoProvisionEnabled *bool `json:"autoProvisionEnabled"` + // templateNamespace contains the namespace name where the Jenkins template is stored + TemplateNamespace string `json:"templateNamespace"` + // templateName is the name of the default Jenkins template + TemplateName string `json:"templateName"` + // serviceName is the name of the Jenkins service OpenShift uses to detect + // whether a Jenkins pipeline handler has already been installed in a project. + // This value *must* match a service name in the provided template. + ServiceName string `json:"serviceName"` + // parameters specifies a set of optional parameters to the Jenkins template. + Parameters map[string]string `json:"parameters"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type OpenShiftControllerManagerConfig struct { + metav1.TypeMeta `json:",inline"` + + KubeClientConfig configv1.KubeClientConfig `json:"kubeClientConfig"` + + // servingInfo describes how to start serving + ServingInfo *configv1.HTTPServingInfo `json:"servingInfo"` + + // leaderElection defines the configuration for electing a controller instance to make changes to + // the cluster. If unspecified, the ControllerTTL value is checked to determine whether the + // legacy direct etcd election code will be used. + LeaderElection configv1.LeaderElection `json:"leaderElection"` + + // controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+ + // named 'foo', '-foo' disables the controller named 'foo'. + // Defaults to "*". + Controllers []string `json:"controllers"` + + ResourceQuota ResourceQuotaControllerConfig `json:"resourceQuota"` + ServiceServingCert ServiceServingCert `json:"serviceServingCert"` + Deployer DeployerControllerConfig `json:"deployer"` + Build BuildControllerConfig `json:"build"` + ServiceAccount ServiceAccountControllerConfig `json:"serviceAccount"` + DockerPullSecret DockerPullSecretControllerConfig `json:"dockerPullSecret"` + Network NetworkControllerConfig `json:"network"` + Ingress IngressControllerConfig `json:"ingress"` + ImageImport ImageImportControllerConfig `json:"imageImport"` + SecurityAllocator SecurityAllocator `json:"securityAllocator"` +} + +type DeployerControllerConfig struct { + ImageTemplateFormat ImageConfig `json:"imageTemplateFormat"` +} + +type BuildControllerConfig struct { + ImageTemplateFormat ImageConfig `json:"imageTemplateFormat"` + + BuildDefaults *BuildDefaultsConfig `json:"buildDefaults"` + BuildOverrides *BuildOverridesConfig `json:"buildOverrides"` + + // additionalTrustedCA is a path to a pem bundle file containing additional CAs that + // should be trusted for image pushes and pulls during builds. + AdditionalTrustedCA string `json:"additionalTrustedCA"` +} + +type ResourceQuotaControllerConfig struct { + ConcurrentSyncs int32 `json:"concurrentSyncs"` + SyncPeriod metav1.Duration `json:"syncPeriod"` + MinResyncPeriod metav1.Duration `json:"minResyncPeriod"` +} + +type IngressControllerConfig struct { + // ingressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare + // metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from. + // For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips, + // nodes, pods, or services. + IngressIPNetworkCIDR string `json:"ingressIPNetworkCIDR"` +} + +// MasterNetworkConfig to be passed to the compiled in network plugin +type NetworkControllerConfig struct { + NetworkPluginName string `json:"networkPluginName"` + // clusterNetworks contains a list of cluster networks that defines the global overlay networks L3 space. + ClusterNetworks []ClusterNetworkEntry `json:"clusterNetworks"` + ServiceNetworkCIDR string `json:"serviceNetworkCIDR"` + VXLANPort uint32 `json:"vxlanPort"` +} + +type ServiceAccountControllerConfig struct { + // managedNames is a list of service account names that will be auto-created in every namespace. + // If no names are specified, the ServiceAccountsController will not be started. + ManagedNames []string `json:"managedNames"` +} + +type DockerPullSecretControllerConfig struct { + // registryURLs is a list of urls that the docker pull secrets should be valid for. + RegistryURLs []string `json:"registryURLs"` + + // internalRegistryHostname is the hostname for the default internal image + // registry. The value must be in "hostname[:port]" format. Docker pull secrets + // will be generated for this registry. + InternalRegistryHostname string `json:"internalRegistryHostname"` +} + +type ImageImportControllerConfig struct { + // maxScheduledImageImportsPerMinute is the maximum number of image streams that will be imported in the background per minute. + // The default value is 60. Set to -1 for unlimited. + MaxScheduledImageImportsPerMinute int `json:"maxScheduledImageImportsPerMinute"` + // disableScheduledImport allows scheduled background import of images to be disabled. + DisableScheduledImport bool `json:"disableScheduledImport"` + // scheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams + // scheduled for background import are checked against the upstream repository. The default value is 15 minutes. + ScheduledImageImportMinimumIntervalSeconds int `json:"scheduledImageImportMinimumIntervalSeconds"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildDefaultsConfig controls the default information for Builds +type BuildDefaultsConfig struct { + metav1.TypeMeta `json:",inline"` + + // gitHTTPProxy is the location of the HTTPProxy for Git source + GitHTTPProxy string `json:"gitHTTPProxy,omitempty"` + + // gitHTTPSProxy is the location of the HTTPSProxy for Git source + GitHTTPSProxy string `json:"gitHTTPSProxy,omitempty"` + + // gitNoProxy is the list of domains for which the proxy should not be used + GitNoProxy string `json:"gitNoProxy,omitempty"` + + // env is a set of default environment variables that will be applied to the + // build if the specified variables do not exist on the build + Env []corev1.EnvVar `json:"env,omitempty"` + + // sourceStrategyDefaults are default values that apply to builds using the + // source strategy. + SourceStrategyDefaults *SourceStrategyDefaultsConfig `json:"sourceStrategyDefaults,omitempty"` + + // imageLabels is a list of labels that are applied to the resulting image. + // User can override a default label by providing a label with the same name in their + // Build/BuildConfig. + ImageLabels []buildv1.ImageLabel `json:"imageLabels,omitempty"` + + // nodeSelector is a selector which must be true for the build pod to fit on a node + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // annotations are annotations that will be added to the build pod + Annotations map[string]string `json:"annotations,omitempty"` + + // resources defines resource requirements to execute the build. + Resources corev1.ResourceRequirements `json:"resources,omitempty"` +} + +// SourceStrategyDefaultsConfig contains values that apply to builds using the +// source strategy. +type SourceStrategyDefaultsConfig struct { + + // incremental indicates if s2i build strategies should perform an incremental + // build or not + Incremental *bool `json:"incremental,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildOverridesConfig controls override settings for builds +type BuildOverridesConfig struct { + metav1.TypeMeta `json:",inline"` + + // forcePull indicates whether the build strategy should always be set to ForcePull=true + ForcePull bool `json:"forcePull"` + + // imageLabels is a list of labels that are applied to the resulting image. + // If user provided a label in their Build/BuildConfig with the same name as one in this + // list, the user's label will be overwritten. + ImageLabels []buildv1.ImageLabel `json:"imageLabels,omitempty"` + + // nodeSelector is a selector which must be true for the build pod to fit on a node + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // annotations are annotations that will be added to the build pod + Annotations map[string]string `json:"annotations,omitempty"` + + // tolerations is a list of Tolerations that will override any existing + // tolerations set on a build pod. + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +// ImageConfig holds the necessary configuration options for building image names for system components +type ImageConfig struct { + // Format is the format of the name to be built for the system component + Format string `json:"format"` + // Latest determines if the latest tag will be pulled from the registry + Latest bool `json:"latest"` +} + +// ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for +// pods fulfilling a service to serve with. +type ServiceServingCert struct { + // Signer holds the signing information used to automatically sign serving certificates. + // If this value is nil, then certs are not signed automatically. + Signer *configv1.CertInfo `json:"signer"` +} + +// ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips. +type ClusterNetworkEntry struct { + // CIDR defines the total range of a cluster networks address space. + CIDR string `json:"cidr"` + // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod. + HostSubnetLength uint32 `json:"hostSubnetLength"` +} + +// SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled. +type SecurityAllocator struct { + // UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the + // block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks + // before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the + // ranges container images will use once user namespaces are started). + UIDAllocatorRange string `json:"uidAllocatorRange"` + // MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is + // "<prefix>/<numberOfLabels>[,<maxCategory>]". The default is "s0/2" and will allocate from c0 -> c1023, which means a total of 535k labels + // are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated + // to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default + // will allow the server to set them automatically. + // + // Examples: + // * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 + // * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511 + // + MCSAllocatorRange string `json:"mcsAllocatorRange"` + // MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS + // ranges (100k namespaces, 535k/5 labels). + MCSLabelsPerProject int `json:"mcsLabelsPerProject"` +} diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..9e8e2011e5a7645c30b1bac1f61cd3b9b8e23526 --- /dev/null +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.deepcopy.go @@ -0,0 +1,618 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + configv1 "github.com/openshift/api/config/v1" + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in AllowedRegistries) DeepCopyInto(out *AllowedRegistries) { + { + in := &in + *out = make(AllowedRegistries, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedRegistries. +func (in AllowedRegistries) DeepCopy() AllowedRegistries { + if in == nil { + return nil + } + out := new(AllowedRegistries) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildControllerConfig) DeepCopyInto(out *BuildControllerConfig) { + *out = *in + out.ImageTemplateFormat = in.ImageTemplateFormat + if in.BuildDefaults != nil { + in, out := &in.BuildDefaults, &out.BuildDefaults + *out = new(BuildDefaultsConfig) + (*in).DeepCopyInto(*out) + } + if in.BuildOverrides != nil { + in, out := &in.BuildOverrides, &out.BuildOverrides + *out = new(BuildOverridesConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildControllerConfig. +func (in *BuildControllerConfig) DeepCopy() *BuildControllerConfig { + if in == nil { + return nil + } + out := new(BuildControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildDefaultsConfig) DeepCopyInto(out *BuildDefaultsConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceStrategyDefaults != nil { + in, out := &in.SourceStrategyDefaults, &out.SourceStrategyDefaults + *out = new(SourceStrategyDefaultsConfig) + (*in).DeepCopyInto(*out) + } + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]buildv1.ImageLabel, len(*in)) + copy(*out, *in) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildDefaultsConfig. +func (in *BuildDefaultsConfig) DeepCopy() *BuildDefaultsConfig { + if in == nil { + return nil + } + out := new(BuildDefaultsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildDefaultsConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildOverridesConfig) DeepCopyInto(out *BuildOverridesConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]buildv1.ImageLabel, len(*in)) + copy(*out, *in) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOverridesConfig. +func (in *BuildOverridesConfig) DeepCopy() *BuildOverridesConfig { + if in == nil { + return nil + } + out := new(BuildOverridesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildOverridesConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeployerControllerConfig) DeepCopyInto(out *DeployerControllerConfig) { + *out = *in + out.ImageTemplateFormat = in.ImageTemplateFormat + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeployerControllerConfig. +func (in *DeployerControllerConfig) DeepCopy() *DeployerControllerConfig { + if in == nil { + return nil + } + out := new(DeployerControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerPullSecretControllerConfig) DeepCopyInto(out *DockerPullSecretControllerConfig) { + *out = *in + if in.RegistryURLs != nil { + in, out := &in.RegistryURLs, &out.RegistryURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerPullSecretControllerConfig. +func (in *DockerPullSecretControllerConfig) DeepCopy() *DockerPullSecretControllerConfig { + if in == nil { + return nil + } + out := new(DockerPullSecretControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontProxyConfig) DeepCopyInto(out *FrontProxyConfig) { + *out = *in + if in.AllowedNames != nil { + in, out := &in.AllowedNames, &out.AllowedNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UsernameHeaders != nil { + in, out := &in.UsernameHeaders, &out.UsernameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GroupHeaders != nil { + in, out := &in.GroupHeaders, &out.GroupHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraHeaderPrefixes != nil { + in, out := &in.ExtraHeaderPrefixes, &out.ExtraHeaderPrefixes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontProxyConfig. +func (in *FrontProxyConfig) DeepCopy() *FrontProxyConfig { + if in == nil { + return nil + } + out := new(FrontProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfig) DeepCopyInto(out *ImageConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfig. +func (in *ImageConfig) DeepCopy() *ImageConfig { + if in == nil { + return nil + } + out := new(ImageConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageImportControllerConfig) DeepCopyInto(out *ImageImportControllerConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageImportControllerConfig. +func (in *ImageImportControllerConfig) DeepCopy() *ImageImportControllerConfig { + if in == nil { + return nil + } + out := new(ImageImportControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyConfig) DeepCopyInto(out *ImagePolicyConfig) { + *out = *in + if in.AllowedRegistriesForImport != nil { + in, out := &in.AllowedRegistriesForImport, &out.AllowedRegistriesForImport + *out = make(AllowedRegistries, len(*in)) + copy(*out, *in) + } + if in.ExternalRegistryHostnames != nil { + in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyConfig. +func (in *ImagePolicyConfig) DeepCopy() *ImagePolicyConfig { + if in == nil { + return nil + } + out := new(ImagePolicyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerConfig) DeepCopyInto(out *IngressControllerConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerConfig. +func (in *IngressControllerConfig) DeepCopy() *IngressControllerConfig { + if in == nil { + return nil + } + out := new(IngressControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JenkinsPipelineConfig) DeepCopyInto(out *JenkinsPipelineConfig) { + *out = *in + if in.AutoProvisionEnabled != nil { + in, out := &in.AutoProvisionEnabled, &out.AutoProvisionEnabled + *out = new(bool) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JenkinsPipelineConfig. +func (in *JenkinsPipelineConfig) DeepCopy() *JenkinsPipelineConfig { + if in == nil { + return nil + } + out := new(JenkinsPipelineConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkControllerConfig) DeepCopyInto(out *NetworkControllerConfig) { + *out = *in + if in.ClusterNetworks != nil { + in, out := &in.ClusterNetworks, &out.ClusterNetworks + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkControllerConfig. +func (in *NetworkControllerConfig) DeepCopy() *NetworkControllerConfig { + if in == nil { + return nil + } + out := new(NetworkControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServerConfig) DeepCopyInto(out *OpenShiftAPIServerConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.GenericAPIServerConfig.DeepCopyInto(&out.GenericAPIServerConfig) + in.AggregatorConfig.DeepCopyInto(&out.AggregatorConfig) + in.ImagePolicyConfig.DeepCopyInto(&out.ImagePolicyConfig) + out.ProjectConfig = in.ProjectConfig + out.RoutingConfig = in.RoutingConfig + in.JenkinsPipelineConfig.DeepCopyInto(&out.JenkinsPipelineConfig) + if in.APIServerArguments != nil { + in, out := &in.APIServerArguments, &out.APIServerArguments + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerConfig. +func (in *OpenShiftAPIServerConfig) DeepCopy() *OpenShiftAPIServerConfig { + if in == nil { + return nil + } + out := new(OpenShiftAPIServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftAPIServerConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManagerConfig) DeepCopyInto(out *OpenShiftControllerManagerConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + out.KubeClientConfig = in.KubeClientConfig + if in.ServingInfo != nil { + in, out := &in.ServingInfo, &out.ServingInfo + *out = new(configv1.HTTPServingInfo) + (*in).DeepCopyInto(*out) + } + out.LeaderElection = in.LeaderElection + if in.Controllers != nil { + in, out := &in.Controllers, &out.Controllers + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.ResourceQuota = in.ResourceQuota + in.ServiceServingCert.DeepCopyInto(&out.ServiceServingCert) + out.Deployer = in.Deployer + in.Build.DeepCopyInto(&out.Build) + in.ServiceAccount.DeepCopyInto(&out.ServiceAccount) + in.DockerPullSecret.DeepCopyInto(&out.DockerPullSecret) + in.Network.DeepCopyInto(&out.Network) + out.Ingress = in.Ingress + out.ImageImport = in.ImageImport + out.SecurityAllocator = in.SecurityAllocator + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerConfig. +func (in *OpenShiftControllerManagerConfig) DeepCopy() *OpenShiftControllerManagerConfig { + if in == nil { + return nil + } + out := new(OpenShiftControllerManagerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftControllerManagerConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectConfig) DeepCopyInto(out *ProjectConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectConfig. +func (in *ProjectConfig) DeepCopy() *ProjectConfig { + if in == nil { + return nil + } + out := new(ProjectConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryLocation. +func (in *RegistryLocation) DeepCopy() *RegistryLocation { + if in == nil { + return nil + } + out := new(RegistryLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaControllerConfig) DeepCopyInto(out *ResourceQuotaControllerConfig) { + *out = *in + out.SyncPeriod = in.SyncPeriod + out.MinResyncPeriod = in.MinResyncPeriod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaControllerConfig. +func (in *ResourceQuotaControllerConfig) DeepCopy() *ResourceQuotaControllerConfig { + if in == nil { + return nil + } + out := new(ResourceQuotaControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingConfig) DeepCopyInto(out *RoutingConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingConfig. +func (in *RoutingConfig) DeepCopy() *RoutingConfig { + if in == nil { + return nil + } + out := new(RoutingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityAllocator) DeepCopyInto(out *SecurityAllocator) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityAllocator. +func (in *SecurityAllocator) DeepCopy() *SecurityAllocator { + if in == nil { + return nil + } + out := new(SecurityAllocator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountControllerConfig) DeepCopyInto(out *ServiceAccountControllerConfig) { + *out = *in + if in.ManagedNames != nil { + in, out := &in.ManagedNames, &out.ManagedNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountControllerConfig. +func (in *ServiceAccountControllerConfig) DeepCopy() *ServiceAccountControllerConfig { + if in == nil { + return nil + } + out := new(ServiceAccountControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceServingCert) DeepCopyInto(out *ServiceServingCert) { + *out = *in + if in.Signer != nil { + in, out := &in.Signer, &out.Signer + *out = new(configv1.CertInfo) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceServingCert. +func (in *ServiceServingCert) DeepCopy() *ServiceServingCert { + if in == nil { + return nil + } + out := new(ServiceServingCert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceStrategyDefaultsConfig) DeepCopyInto(out *SourceStrategyDefaultsConfig) { + *out = *in + if in.Incremental != nil { + in, out := &in.Incremental, &out.Incremental + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceStrategyDefaultsConfig. +func (in *SourceStrategyDefaultsConfig) DeepCopy() *SourceStrategyDefaultsConfig { + if in == nil { + return nil + } + out := new(SourceStrategyDefaultsConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..9547ae2bea91af3a5373105ee507e2a99a5259a0 --- /dev/null +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,235 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_BuildControllerConfig = map[string]string{ + "additionalTrustedCA": "additionalTrustedCA is a path to a pem bundle file containing additional CAs that should be trusted for image pushes and pulls during builds.", +} + +func (BuildControllerConfig) SwaggerDoc() map[string]string { + return map_BuildControllerConfig +} + +var map_BuildDefaultsConfig = map[string]string{ + "": "BuildDefaultsConfig controls the default information for Builds", + "gitHTTPProxy": "gitHTTPProxy is the location of the HTTPProxy for Git source", + "gitHTTPSProxy": "gitHTTPSProxy is the location of the HTTPSProxy for Git source", + "gitNoProxy": "gitNoProxy is the list of domains for which the proxy should not be used", + "env": "env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", + "sourceStrategyDefaults": "sourceStrategyDefaults are default values that apply to builds using the source strategy.", + "imageLabels": "imageLabels is a list of labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", + "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node", + "annotations": "annotations are annotations that will be added to the build pod", + "resources": "resources defines resource requirements to execute the build.", +} + +func (BuildDefaultsConfig) SwaggerDoc() map[string]string { + return map_BuildDefaultsConfig +} + +var map_BuildOverridesConfig = map[string]string{ + "": "BuildOverridesConfig controls override settings for builds", + "forcePull": "forcePull indicates whether the build strategy should always be set to ForcePull=true", + "imageLabels": "imageLabels is a list of labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", + "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node", + "annotations": "annotations are annotations that will be added to the build pod", + "tolerations": "tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", +} + +func (BuildOverridesConfig) SwaggerDoc() map[string]string { + return map_BuildOverridesConfig +} + +var map_ClusterNetworkEntry = map[string]string{ + "": "ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.", + "cidr": "CIDR defines the total range of a cluster networks address space.", + "hostSubnetLength": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.", +} + +func (ClusterNetworkEntry) SwaggerDoc() map[string]string { + return map_ClusterNetworkEntry +} + +var map_DockerPullSecretControllerConfig = map[string]string{ + "registryURLs": "registryURLs is a list of urls that the docker pull secrets should be valid for.", + "internalRegistryHostname": "internalRegistryHostname is the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. Docker pull secrets will be generated for this registry.", +} + +func (DockerPullSecretControllerConfig) SwaggerDoc() map[string]string { + return map_DockerPullSecretControllerConfig +} + +var map_FrontProxyConfig = map[string]string{ + "clientCA": "clientCA is a path to the CA bundle to use to verify the common name of the front proxy's client cert", + "allowedNames": "allowedNames is an optional list of common names to require a match from.", + "usernameHeaders": "usernameHeaders is the set of headers to check for the username", + "groupHeaders": "groupHeaders is the set of headers to check for groups", + "extraHeaderPrefixes": "extraHeaderPrefixes is the set of header prefixes to check for user extra", +} + +func (FrontProxyConfig) SwaggerDoc() map[string]string { + return map_FrontProxyConfig +} + +var map_ImageConfig = map[string]string{ + "": "ImageConfig holds the necessary configuration options for building image names for system components", + "format": "Format is the format of the name to be built for the system component", + "latest": "Latest determines if the latest tag will be pulled from the registry", +} + +func (ImageConfig) SwaggerDoc() map[string]string { + return map_ImageConfig +} + +var map_ImageImportControllerConfig = map[string]string{ + "maxScheduledImageImportsPerMinute": "maxScheduledImageImportsPerMinute is the maximum number of image streams that will be imported in the background per minute. The default value is 60. Set to -1 for unlimited.", + "disableScheduledImport": "disableScheduledImport allows scheduled background import of images to be disabled.", + "scheduledImageImportMinimumIntervalSeconds": "scheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams scheduled for background import are checked against the upstream repository. The default value is 15 minutes.", +} + +func (ImageImportControllerConfig) SwaggerDoc() map[string]string { + return map_ImageImportControllerConfig +} + +var map_ImagePolicyConfig = map[string]string{ + "maxImagesBulkImportedPerRepository": "maxImagesBulkImportedPerRepository controls the number of images that are imported when a user does a bulk import of a container repository. This number is set low to prevent users from importing large numbers of images accidentally. Set -1 for no limit.", + "allowedRegistriesForImport": "allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", + "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.", + "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", + "additionalTrustedCA": "additionalTrustedCA is a path to a pem bundle file containing additional CAs that should be trusted during imagestream import.", +} + +func (ImagePolicyConfig) SwaggerDoc() map[string]string { + return map_ImagePolicyConfig +} + +var map_IngressControllerConfig = map[string]string{ + "ingressIPNetworkCIDR": "ingressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from. For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips, nodes, pods, or services.", +} + +func (IngressControllerConfig) SwaggerDoc() map[string]string { + return map_IngressControllerConfig +} + +var map_JenkinsPipelineConfig = map[string]string{ + "": "JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy", + "autoProvisionEnabled": "autoProvisionEnabled determines whether a Jenkins server will be spawned from the provided template when the first build config in the project with type JenkinsPipeline is created. When not specified this option defaults to true.", + "templateNamespace": "templateNamespace contains the namespace name where the Jenkins template is stored", + "templateName": "templateName is the name of the default Jenkins template", + "serviceName": "serviceName is the name of the Jenkins service OpenShift uses to detect whether a Jenkins pipeline handler has already been installed in a project. This value *must* match a service name in the provided template.", + "parameters": "parameters specifies a set of optional parameters to the Jenkins template.", +} + +func (JenkinsPipelineConfig) SwaggerDoc() map[string]string { + return map_JenkinsPipelineConfig +} + +var map_NetworkControllerConfig = map[string]string{ + "": "MasterNetworkConfig to be passed to the compiled in network plugin", + "clusterNetworks": "clusterNetworks contains a list of cluster networks that defines the global overlay networks L3 space.", +} + +func (NetworkControllerConfig) SwaggerDoc() map[string]string { + return map_NetworkControllerConfig +} + +var map_OpenShiftAPIServerConfig = map[string]string{ + "aggregatorConfig": "aggregatorConfig contains information about how to verify the aggregator front proxy", + "imagePolicyConfig": "imagePolicyConfig feeds the image policy admission plugin", + "projectConfig": "projectConfig feeds an admission plugin", + "routingConfig": "routingConfig holds information about routing and route generation", + "serviceAccountOAuthGrantMethod": "serviceAccountOAuthGrantMethod is used for determining client authorization for service account oauth client. It must be either: deny, prompt, or \"\"", + "jenkinsPipelineConfig": "jenkinsPipelineConfig holds information about the default Jenkins template used for JenkinsPipeline build strategy.", + "cloudProviderFile": "cloudProviderFile points to the cloud config file", +} + +func (OpenShiftAPIServerConfig) SwaggerDoc() map[string]string { + return map_OpenShiftAPIServerConfig +} + +var map_OpenShiftControllerManagerConfig = map[string]string{ + "servingInfo": "servingInfo describes how to start serving", + "leaderElection": "leaderElection defines the configuration for electing a controller instance to make changes to the cluster. If unspecified, the ControllerTTL value is checked to determine whether the legacy direct etcd election code will be used.", + "controllers": "controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller \"+ named 'foo', '-foo' disables the controller named 'foo'. Defaults to \"*\".", +} + +func (OpenShiftControllerManagerConfig) SwaggerDoc() map[string]string { + return map_OpenShiftControllerManagerConfig +} + +var map_ProjectConfig = map[string]string{ + "defaultNodeSelector": "defaultNodeSelector holds default project node label selector", + "projectRequestMessage": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint", + "projectRequestTemplate": "projectRequestTemplate is the template to use for creating projects in response to projectrequest. It is in the format namespace/template and it is optional. If it is not specified, a default template is used.", +} + +func (ProjectConfig) SwaggerDoc() map[string]string { + return map_ProjectConfig +} + +var map_RegistryLocation = map[string]string{ + "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.", + "domainName": "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", + "insecure": "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", +} + +func (RegistryLocation) SwaggerDoc() map[string]string { + return map_RegistryLocation +} + +var map_RoutingConfig = map[string]string{ + "": "RoutingConfig holds the necessary configuration options for routing to subdomains", + "subdomain": "subdomain is the suffix appended to $service.$namespace. to form the default route hostname DEPRECATED: This field is being replaced by routers setting their own defaults. This is the \"default\" route.", +} + +func (RoutingConfig) SwaggerDoc() map[string]string { + return map_RoutingConfig +} + +var map_SecurityAllocator = map[string]string{ + "": "SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.", + "uidAllocatorRange": "UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the ranges container images will use once user namespaces are started).", + "mcsAllocatorRange": "MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is \"<prefix>/<numberOfLabels>[,<maxCategory>]\". The default is \"s0/2\" and will allocate from c0 -> c1023, which means a total of 535k labels are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default will allow the server to set them automatically.\n\nExamples: * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511", + "mcsLabelsPerProject": "MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS ranges (100k namespaces, 535k/5 labels).", +} + +func (SecurityAllocator) SwaggerDoc() map[string]string { + return map_SecurityAllocator +} + +var map_ServiceAccountControllerConfig = map[string]string{ + "managedNames": "managedNames is a list of service account names that will be auto-created in every namespace. If no names are specified, the ServiceAccountsController will not be started.", +} + +func (ServiceAccountControllerConfig) SwaggerDoc() map[string]string { + return map_ServiceAccountControllerConfig +} + +var map_ServiceServingCert = map[string]string{ + "": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", + "signer": "Signer holds the signing information used to automatically sign serving certificates. If this value is nil, then certs are not signed automatically.", +} + +func (ServiceServingCert) SwaggerDoc() map[string]string { + return map_ServiceServingCert +} + +var map_SourceStrategyDefaultsConfig = map[string]string{ + "": "SourceStrategyDefaultsConfig contains values that apply to builds using the source strategy.", + "incremental": "incremental indicates if s2i build strategies should perform an incremental build or not", +} + +func (SourceStrategyDefaultsConfig) SwaggerDoc() map[string]string { + return map_SourceStrategyDefaultsConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/operator/install.go b/vendor/github.com/openshift/api/operator/install.go new file mode 100644 index 0000000000000000000000000000000000000000..9cbf25a4bbab97d6c27c2855516d7c8d425391dd --- /dev/null +++ b/vendor/github.com/openshift/api/operator/install.go @@ -0,0 +1,27 @@ +package operator + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + operatorv1 "github.com/openshift/api/operator/v1" + operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" +) + +const ( + GroupName = "operator.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(operatorv1alpha1.Install, operatorv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..83d5a5a39df5623f644e5c92298eb07c7698a4b3 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml @@ -0,0 +1,200 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: kubeapiservers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: KubeAPIServer + plural: kubeapiservers + singular: kubeapiserver + preserveUnknownFields: false + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: KubeAPIServer provides information to configure an operator to + manage kube-apiserver. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the Kubernetes + API Server + properties: + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod + installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment + of the operand by providing a unique string. This provides a mechanism + to kick a previously failed deployment and provide a reason why you + think it will work this time instead of failing again on the same + config. + type: string + logLevel: + description: logLevel is an intent based logging for an overall component. It + does not give fine grained control, but it is a simple way to manage + coarse grained logging choices that operators have to interpret for + their operands. + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Force)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has + observed from the cluster state. It exists in spec because it is + an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: operatorLogLevel is an intent based logging for the operator + itself. It does not give fine grained control, but it is a simple + way to manage coarse grained logging choices that operators have to + interpret for themselves. + type: string + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static + pod installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Kubernetes + API Server + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to + be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a + given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most + recent deployment + format: int32 + type: integer + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason + for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across + individual nodes + items: + description: NodeStatus provides information about the current state + of a particular node managed by this operator. + properties: + currentRevision: + description: currentRevision is the generation of the most recently + successful deployment + format: int32 + type: integer + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment + we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of the errors + during the failed deployment referenced in lastFailedRevision + items: + type: string + type: array + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment + we're trying to apply + format: int32 + type: integer + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + version: v1 diff --git a/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml-merge-patch b/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml-merge-patch new file mode 100644 index 0000000000000000000000000000000000000000..be6a3cc2e7fb0b6af25e1c77f2bfa76cf27990c3 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml-merge-patch @@ -0,0 +1,8 @@ +spec: + validation: + openAPIV3Schema: + properties: + spec: + properties: + managementState: + pattern: "^(Managed|Force)$" \ No newline at end of file diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..257620128ca7367040fc6704f66177de40916736 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml @@ -0,0 +1,202 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: kubecontrollermanagers.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: KubeControllerManager + plural: kubecontrollermanagers + singular: kubecontrollermanager + preserveUnknownFields: false + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: KubeControllerManager provides information to configure an operator + to manage kube-controller-manager. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the Kubernetes + Controller Manager + properties: + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod + installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment + of the operand by providing a unique string. This provides a mechanism + to kick a previously failed deployment and provide a reason why you + think it will work this time instead of failing again on the same + config. + type: string + logLevel: + description: logLevel is an intent based logging for an overall component. It + does not give fine grained control, but it is a simple way to manage + coarse grained logging choices that operators have to interpret for + their operands. + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Force)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has + observed from the cluster state. It exists in spec because it is + an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: operatorLogLevel is an intent based logging for the operator + itself. It does not give fine grained control, but it is a simple + way to manage coarse grained logging choices that operators have to + interpret for themselves. + type: string + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static + pod installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Kubernetes + Controller Manager + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to + be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a + given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most + recent deployment + format: int32 + type: integer + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason + for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across + individual nodes + items: + description: NodeStatus provides information about the current state + of a particular node managed by this operator. + properties: + currentRevision: + description: currentRevision is the generation of the most recently + successful deployment + format: int32 + type: integer + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment + we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of the errors + during the failed deployment referenced in lastFailedRevision + items: + type: string + type: array + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment + we're trying to apply + format: int32 + type: integer + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + version: v1 diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml-merge-patch b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml-merge-patch new file mode 100644 index 0000000000000000000000000000000000000000..5cc312a439fd714d97e862d6e28010ac1131440b --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml-merge-patch @@ -0,0 +1,10 @@ +# this file can be removed once we switch to v0.2 of crd generator +# see: https://github.com/openshift/cluster-kube-scheduler-operator/pull/148 +spec: + validation: + openAPIV3Schema: + properties: + spec: + properties: + managementState: + pattern: "^(Managed|Force)$" diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7d7ac277d500cf1744c970143aba94dad6774669 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml @@ -0,0 +1,202 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: kubeschedulers.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: KubeScheduler + plural: kubeschedulers + singular: kubescheduler + preserveUnknownFields: false + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: KubeScheduler provides information to configure an operator to + manage scheduler. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the Kubernetes + Scheduler + properties: + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod + installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment + of the operand by providing a unique string. This provides a mechanism + to kick a previously failed deployment and provide a reason why you + think it will work this time instead of failing again on the same + config. + type: string + logLevel: + description: logLevel is an intent based logging for an overall component. It + does not give fine grained control, but it is a simple way to manage + coarse grained logging choices that operators have to interpret for + their operands. + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Force)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has + observed from the cluster state. It exists in spec because it is + an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: operatorLogLevel is an intent based logging for the operator + itself. It does not give fine grained control, but it is a simple + way to manage coarse grained logging choices that operators have to + interpret for themselves. + type: string + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static + pod installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Kubernetes + Scheduler + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to + be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a + given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most + recent deployment + format: int32 + type: integer + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason + for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across + individual nodes + items: + description: NodeStatus provides information about the current state + of a particular node managed by this operator. + properties: + currentRevision: + description: currentRevision is the generation of the most recently + successful deployment + format: int32 + type: integer + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment + we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of the errors + during the failed deployment referenced in lastFailedRevision + items: + type: string + type: array + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment + we're trying to apply + format: int32 + type: integer + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + version: v1 diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml-merge-patch b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml-merge-patch new file mode 100644 index 0000000000000000000000000000000000000000..5cc312a439fd714d97e862d6e28010ac1131440b --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml-merge-patch @@ -0,0 +1,10 @@ +# this file can be removed once we switch to v0.2 of crd generator +# see: https://github.com/openshift/cluster-kube-scheduler-operator/pull/148 +spec: + validation: + openAPIV3Schema: + properties: + spec: + properties: + managementState: + pattern: "^(Managed|Force)$" diff --git a/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2fdc2ddb36551f0c97a81cb0c28508bb69f7c179 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml @@ -0,0 +1,147 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: openshiftapiservers.operator.openshift.io +spec: + scope: Cluster + preserveUnknownFields: false + group: operator.openshift.io + version: v1 + names: + kind: OpenShiftAPIServer + plural: openshiftapiservers + singular: openshiftapiserver + categories: + - coreoperators + subresources: + status: {} + validation: + openAPIV3Schema: + description: OpenShiftAPIServer provides information to configure an operator + to manage openshift-apiserver. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the OpenShift + API Server. + type: object + properties: + logLevel: + description: logLevel is an intent based logging for an overall component. It + does not give fine grained control, but it is a simple way to manage + coarse grained logging choices that operators have to interpret for + their operands. + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has + observed from the cluster state. It exists in spec because it is + an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: operatorLogLevel is an intent based logging for the operator + itself. It does not give fine grained control, but it is a simple + way to manage coarse grained logging choices that operators have to + interpret for themselves. + type: string + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: status defines the observed status of the OpenShift API Server. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to + be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a + given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + latestAvailableRevision: + description: latestAvailableRevision is the latest revision used as + suffix of revisioned secrets like encryption-config. A new revision + causes a new deployment of pods. + type: integer + format: int32 + minimum: 0 + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string diff --git a/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..20ce796cf3b5536f029f02ddc7763c74c14c518c --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml @@ -0,0 +1,147 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: kubestorageversionmigrators.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: KubeStorageVersionMigrator + listKind: KubeStorageVersionMigratorList + plural: kubestorageversionmigrators + singular: kubestorageversionmigrator + scope: Cluster + preserveUnknownFields: false + subresources: + status: {} + version: v1 + versions: + - name: v1 + served: true + storage: true + "validation": + "openAPIV3Schema": + description: KubeStorageVersionMigrator provides information to configure an + operator to manage kube-storage-version-migrator. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + properties: + logLevel: + description: logLevel is an intent based logging for an overall component. It + does not give fine grained control, but it is a simple way to manage + coarse grained logging choices that operators have to interpret for + their operands. + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has + observed from the cluster state. It exists in spec because it is + an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: operatorLogLevel is an intent based logging for the operator + itself. It does not give fine grained control, but it is a simple + way to manage coarse grained logging choices that operators have to + interpret for themselves. + type: string + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to + be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a + given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b983e32adcd34be8b962587611c5f687576fb3a0 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml @@ -0,0 +1,142 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: authentications.operator.openshift.io +spec: + scope: Cluster + preserveUnknownFields: false + group: operator.openshift.io + version: v1 + names: + kind: Authentication + plural: authentications + singular: authentication + subresources: + status: {} + validation: + openAPIV3Schema: + description: Authentication provides information to configure an operator to + manage authentication. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + properties: + logLevel: + description: logLevel is an intent based logging for an overall component. It + does not give fine grained control, but it is a simple way to manage + coarse grained logging choices that operators have to interpret for + their operands. + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has + observed from the cluster state. It exists in spec because it is + an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: operatorLogLevel is an intent based logging for the operator + itself. It does not give fine grained control, but it is a simple + way to manage coarse grained logging choices that operators have to + interpret for themselves. + type: string + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to + be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a + given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + managingOAuthAPIServer: + description: ManagingOAuthAPIServer indicates whether this operator + is managing OAuth related APIs. Setting this field to true will cause + OAS-O to step down. Note that this field will be removed in the future + releases, once https://github.com/openshift/enhancements/blob/master/enhancements/authentication/separate-oauth-resources.md + is fully implemented + type: boolean + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..589797bb7146f33142716f5551cbe73add2ca46a --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml @@ -0,0 +1,137 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: openshiftcontrollermanagers.operator.openshift.io +spec: + scope: Cluster + preserveUnknownFields: false + group: operator.openshift.io + version: v1 + names: + kind: OpenShiftControllerManager + plural: openshiftcontrollermanagers + singular: openshiftcontrollermanager + categories: + - coreoperators + subresources: + status: {} + validation: + openAPIV3Schema: + description: OpenShiftControllerManager provides information to configure an + operator to manage openshift-controller-manager. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + properties: + logLevel: + description: logLevel is an intent based logging for an overall component. It + does not give fine grained control, but it is a simple way to manage + coarse grained logging choices that operators have to interpret for + their operands. + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has + observed from the cluster state. It exists in spec because it is + an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: operatorLogLevel is an intent based logging for the operator + itself. It does not give fine grained control, but it is a simple + way to manage coarse grained logging choices that operators have to + interpret for themselves. + type: string + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to + be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a + given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-svcat-apiserver-operator_02_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-svcat-apiserver-operator_02_config.crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..98322e984028ecc9084c99dfe1aab1c234d446ec --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-svcat-apiserver-operator_02_config.crd.yaml @@ -0,0 +1,137 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: servicecatalogapiservers.operator.openshift.io +spec: + scope: Cluster + preserveUnknownFields: false + group: operator.openshift.io + version: v1 + names: + kind: ServiceCatalogAPIServer + plural: servicecatalogapiservers + singular: servicecatalogapiserver + categories: + - coreoperators + subresources: + status: {} + validation: + openAPIV3Schema: + description: ServiceCatalogAPIServer provides information to configure an operator + to manage Service Catalog API Server + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + properties: + logLevel: + description: logLevel is an intent based logging for an overall component. It + does not give fine grained control, but it is a simple way to manage + coarse grained logging choices that operators have to interpret for + their operands. + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has + observed from the cluster state. It exists in spec because it is + an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: operatorLogLevel is an intent based logging for the operator + itself. It does not give fine grained control, but it is a simple + way to manage coarse grained logging choices that operators have to + interpret for themselves. + type: string + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to + be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a + given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-svcat-controller-manager-operator_02_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-svcat-controller-manager-operator_02_config.crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..53453298e329b10f059ef3d9aa32bc73a7c26e67 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-svcat-controller-manager-operator_02_config.crd.yaml @@ -0,0 +1,137 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: servicecatalogcontrollermanagers.operator.openshift.io +spec: + scope: Cluster + preserveUnknownFields: false + group: operator.openshift.io + version: v1 + names: + kind: ServiceCatalogControllerManager + plural: servicecatalogcontrollermanagers + singular: servicecatalogcontrollermanager + categories: + - coreoperators + subresources: + status: {} + validation: + openAPIV3Schema: + description: ServiceCatalogControllerManager provides information to configure + an operator to manage Service Catalog Controller Manager + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + properties: + logLevel: + description: logLevel is an intent based logging for an overall component. It + does not give fine grained control, but it is a simple way to manage + coarse grained logging choices that operators have to interpret for + their operands. + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has + observed from the cluster state. It exists in spec because it is + an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: operatorLogLevel is an intent based logging for the operator + itself. It does not give fine grained control, but it is a simple + way to manage coarse grained logging choices that operators have to + interpret for themselves. + type: string + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to + be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a + given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-custom-resource-definition.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-custom-resource-definition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..94206d07d4fcf8ff25f849cffc6dd84ce5b64482 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-custom-resource-definition.yaml @@ -0,0 +1,617 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: ingresscontrollers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: IngressController + listKind: IngressControllerList + plural: ingresscontrollers + singular: ingresscontroller + scope: "" + preserveUnknownFields: false + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.availableReplicas + status: {} + validation: + openAPIV3Schema: + description: "IngressController describes a managed ingress controller for the + cluster. The controller can service OpenShift Route and Kubernetes Ingress + resources. \n When an IngressController is created, a new ingress controller + deployment is created to allow external traffic to reach the services that + expose Ingress or Route resources. Updating this resource may lead to disruption + for public facing network connections as a new ingress controller revision + may be rolled out. \n https://kubernetes.io/docs/concepts/services-networking/ingress-controllers + \n Whenever possible, sensible defaults for the platform are used. See each + field for more details." + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the IngressController. + type: object + properties: + defaultCertificate: + description: "defaultCertificate is a reference to a secret containing + the default certificate served by the ingress controller. When Routes + don't specify their own certificate, defaultCertificate is used. \n + The secret must contain the following keys and data: \n tls.crt: + certificate file contents tls.key: key file contents \n If unset, + a wildcard certificate is automatically generated and used. The certificate + is valid for the ingress controller domain (and subdomains) and the + generated certificate's CA will be automatically integrated with the + cluster's trust store. \n The in-use certificate (whether generated + or user-specified) will be automatically integrated with OpenShift's + built-in OAuth server." + type: object + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + domain: + description: "domain is a DNS name serviced by the ingress controller + and is used to configure multiple features: \n * For the LoadBalancerService + endpoint publishing strategy, domain is used to configure DNS records. + See endpointPublishingStrategy. \n * When using a generated default + certificate, the certificate will be valid for domain and its subdomains. + See defaultCertificate. \n * The value is published to individual + Route statuses so that end-users know where to target external DNS + records. \n domain must be unique among all IngressControllers, and + cannot be updated. \n If empty, defaults to ingress.config.openshift.io/cluster + .spec.domain." + type: string + endpointPublishingStrategy: + description: "endpointPublishingStrategy is used to publish the ingress + controller endpoints to other networks, enable load balancer integrations, + etc. \n If unset, the default is based on infrastructure.config.openshift.io/cluster + .status.platform: \n AWS: LoadBalancerService (with External + scope) Azure: LoadBalancerService (with External scope) GCP: + \ LoadBalancerService (with External scope) IBMCloud: LoadBalancerService + (with External scope) Libvirt: HostNetwork \n Any other platform + types (including None) default to HostNetwork. \n endpointPublishingStrategy + cannot be updated." + type: object + required: + - type + properties: + hostNetwork: + description: hostNetwork holds parameters for the HostNetwork endpoint + publishing strategy. Present only if type is HostNetwork. + type: object + loadBalancer: + description: loadBalancer holds parameters for the load balancer. + Present only if type is LoadBalancerService. + type: object + required: + - scope + properties: + scope: + description: scope indicates the scope at which the load balancer + is exposed. Possible values are "External" and "Internal". + type: string + enum: + - Internal + - External + nodePort: + description: nodePort holds parameters for the NodePortService endpoint + publishing strategy. Present only if type is NodePortService. + type: object + private: + description: private holds parameters for the Private endpoint publishing + strategy. Present only if type is Private. + type: object + type: + description: "type is the publishing strategy to use. Valid values + are: \n * LoadBalancerService \n Publishes the ingress controller + using a Kubernetes LoadBalancer Service. \n In this configuration, + the ingress controller deployment uses container networking. A + LoadBalancer Service is created to publish the deployment. \n + See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + \n If domain is set, a wildcard DNS record will be managed to + point at the LoadBalancer Service's external name. DNS records + are managed only in DNS zones defined by dns.config.openshift.io/cluster + .spec.publicZone and .spec.privateZone. \n Wildcard DNS management + is currently supported only on the AWS, Azure, and GCP platforms. + \n * HostNetwork \n Publishes the ingress controller on node ports + where the ingress controller is deployed. \n In this configuration, + the ingress controller deployment uses host networking, bound + to node ports 80 and 443. The user is responsible for configuring + an external load balancer to publish the ingress controller via + the node ports. \n * Private \n Does not publish the ingress controller. + \n In this configuration, the ingress controller deployment uses + container networking, and is not explicitly published. The user + must manually publish the ingress controller. \n * NodePortService + \n Publishes the ingress controller using a Kubernetes NodePort + Service. \n In this configuration, the ingress controller deployment + uses container networking. A NodePort Service is created to publish + the deployment. The specific node ports are dynamically allocated + by OpenShift; however, to support static port allocations, user + changes to the node port field of the managed NodePort Service + will preserved." + type: string + enum: + - LoadBalancerService + - HostNetwork + - Private + - NodePortService + namespaceSelector: + description: "namespaceSelector is used to filter the set of namespaces + serviced by the ingress controller. This is useful for implementing + shards. \n If unset, the default is no filtering." + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + nodePlacement: + description: "nodePlacement enables explicit control over the scheduling + of the ingress controller. \n If unset, defaults are used. See NodePlacement + for more details." + type: object + properties: + nodeSelector: + description: "nodeSelector is the node selector applied to ingress + controller deployments. \n If unset, the default is: \n beta.kubernetes.io/os: + linux node-role.kubernetes.io/worker: '' \n If set, the specified + selector is used and replaces the default." + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the + key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + tolerations: + description: "tolerations is a list of tolerations applied to ingress + controller deployments. \n The default is an empty list. \n See + https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/" + type: array + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple <key,value,effect> using the + matching operator <operator>. + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match + all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to + Equal. Exists is equivalent to wildcard for value, so that + a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do + not evict). Zero and negative values will be treated as + 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + replicas: + description: replicas is the desired number of ingress controller replicas. + If unset, defaults to 2. + type: integer + format: int32 + routeAdmission: + description: "routeAdmission defines a policy for handling new route + claims (for example, to allow or deny claims across namespaces). \n + If empty, defaults will be applied. See specific routeAdmission fields + for details about their defaults." + type: object + properties: + namespaceOwnership: + description: "namespaceOwnership describes how host name claims + across namespaces should be handled. \n Value must be one of: + \n - Strict: Do not allow routes in different namespaces to claim + the same host. \n - InterNamespaceAllowed: Allow routes to claim + different paths of the same host name across namespaces. \n + If empty, the default is Strict." + type: string + enum: + - InterNamespaceAllowed + - Strict + routeSelector: + description: "routeSelector is used to filter the set of Routes serviced + by the ingress controller. This is useful for implementing shards. + \n If unset, the default is no filtering." + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + tlsSecurityProfile: + description: "tlsSecurityProfile specifies settings for TLS connections + for ingresscontrollers. \n If unset, the default is based on the apiservers.config.openshift.io/cluster + resource. \n Note that when using the Old, Intermediate, and Modern + profile types, the effective profile configuration is subject to change + between releases. For example, given a specification to use the Intermediate + profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may + cause a new profile configuration to be applied to the ingress controller, + resulting in a rollout. \n Note that the minimum TLS version for ingress + controllers is 1.1, and the maximum TLS version is 1.2. An implication + of this restriction is that the Modern TLS profile type cannot be + used because it requires TLS 1.3." + type: object + properties: + custom: + description: "custom is a user-defined TLS security profile. Be + extremely careful using a custom profile as invalid configurations + can be catastrophic. An example custom profile looks like this: + \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 + \ - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 + \ minTLSVersion: TLSv1.1" + type: object + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms + that are negotiated during the TLS handshake. Operators may + remove entries their operands do not support. For example, + to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" + type: array + items: + type: string + minTLSVersion: + description: "minTLSVersion is used to specify the minimal version + of the TLS protocol that is negotiated during the TLS handshake. + For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): + \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest + minTLSVersion allowed is VersionTLS12" + type: string + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 + nullable: true + intermediate: + description: "intermediate is a TLS security profile based on: \n + https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 + \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 + \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 + \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 + \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 + \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 + \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 + \ minTLSVersion: TLSv1.2" + type: object + nullable: true + modern: + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 + \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 + \ minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." + type: object + nullable: true + old: + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 + \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 + \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 + \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 + \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 + \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 + \ - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 + \ - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - + ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 + \ - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - + DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 + \ - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 + \ - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: + TLSv1.0" + type: object + nullable: true + type: + description: "type is one of Old, Intermediate, Modern or Custom. + Custom provides the ability to specify individual TLS security + profile parameters. Old, Intermediate and Modern are TLS security + profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations + \n The profiles are intent based, so they may change over time + as new ciphers are developed and existing ciphers are found to + be insecure. Depending on precisely which ciphers are available + to a process, the list may be reduced. \n Note that the Modern + profile is currently not supported because it is not yet well + adopted by common software libraries." + type: string + enum: + - Old + - Intermediate + - Modern + - Custom + status: + description: status is the most recently observed status of the IngressController. + type: object + properties: + availableReplicas: + description: availableReplicas is number of observed available replicas + according to the ingress controller deployment. + type: integer + format: int32 + conditions: + description: "conditions is a list of conditions and their status. \n + Available means the ingress controller deployment is available and + servicing route and ingress resources (i.e, .status.availableReplicas + equals .spec.replicas) \n There are additional conditions which indicate + the status of other ingress controller features and capabilities. + \n * LoadBalancerManaged - True if the following conditions are + met: * The endpoint publishing strategy requires a service load + balancer. - False if any of those conditions are unsatisfied. \n + \ * LoadBalancerReady - True if the following conditions are met: + \ * A load balancer is managed. * The load balancer is ready. + \ - False if any of those conditions are unsatisfied. \n * DNSManaged + \ - True if the following conditions are met: * The endpoint + publishing strategy and platform support DNS. * The ingress controller + domain is set. * dns.config.openshift.io/cluster configures DNS + zones. - False if any of those conditions are unsatisfied. \n * + DNSReady - True if the following conditions are met: * DNS is + managed. * DNS records have been successfully created. - False + if any of those conditions are unsatisfied." + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + domain: + description: domain is the actual domain in use. + type: string + endpointPublishingStrategy: + description: endpointPublishingStrategy is the actual strategy in use. + type: object + required: + - type + properties: + hostNetwork: + description: hostNetwork holds parameters for the HostNetwork endpoint + publishing strategy. Present only if type is HostNetwork. + type: object + loadBalancer: + description: loadBalancer holds parameters for the load balancer. + Present only if type is LoadBalancerService. + type: object + required: + - scope + properties: + scope: + description: scope indicates the scope at which the load balancer + is exposed. Possible values are "External" and "Internal". + type: string + enum: + - Internal + - External + nodePort: + description: nodePort holds parameters for the NodePortService endpoint + publishing strategy. Present only if type is NodePortService. + type: object + private: + description: private holds parameters for the Private endpoint publishing + strategy. Present only if type is Private. + type: object + type: + description: "type is the publishing strategy to use. Valid values + are: \n * LoadBalancerService \n Publishes the ingress controller + using a Kubernetes LoadBalancer Service. \n In this configuration, + the ingress controller deployment uses container networking. A + LoadBalancer Service is created to publish the deployment. \n + See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + \n If domain is set, a wildcard DNS record will be managed to + point at the LoadBalancer Service's external name. DNS records + are managed only in DNS zones defined by dns.config.openshift.io/cluster + .spec.publicZone and .spec.privateZone. \n Wildcard DNS management + is currently supported only on the AWS, Azure, and GCP platforms. + \n * HostNetwork \n Publishes the ingress controller on node ports + where the ingress controller is deployed. \n In this configuration, + the ingress controller deployment uses host networking, bound + to node ports 80 and 443. The user is responsible for configuring + an external load balancer to publish the ingress controller via + the node ports. \n * Private \n Does not publish the ingress controller. + \n In this configuration, the ingress controller deployment uses + container networking, and is not explicitly published. The user + must manually publish the ingress controller. \n * NodePortService + \n Publishes the ingress controller using a Kubernetes NodePort + Service. \n In this configuration, the ingress controller deployment + uses container networking. A NodePort Service is created to publish + the deployment. The specific node ports are dynamically allocated + by OpenShift; however, to support static port allocations, user + changes to the node port field of the managed NodePort Service + will preserved." + type: string + enum: + - LoadBalancerService + - HostNetwork + - Private + - NodePortService + observedGeneration: + description: observedGeneration is the most recent generation observed. + type: integer + format: int64 + selector: + description: selector is a label selector, in string format, for ingress + controller pods corresponding to the IngressController. The number + of matching pods should equal the value of availableReplicas. + type: string + tlsProfile: + description: tlsProfile is the TLS connection configuration that is + in effect. + type: object + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms that + are negotiated during the TLS handshake. Operators may remove + entries their operands do not support. For example, to use DES-CBC3-SHA + \ (yaml): \n ciphers: - DES-CBC3-SHA" + type: array + items: + type: string + minTLSVersion: + description: "minTLSVersion is used to specify the minimal version + of the TLS protocol that is negotiated during the TLS handshake. + For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: + TLSv1.1 \n NOTE: currently the highest minTLSVersion allowed is + VersionTLS12" + type: string + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 + version: v1 + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c3349ff9706f0544dbb87b1c35c53ec2c4663730 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml @@ -0,0 +1,138 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: servicecas.operator.openshift.io +spec: + scope: Cluster + preserveUnknownFields: false + group: operator.openshift.io + version: v1 + names: + kind: ServiceCA + plural: servicecas + singular: serviceca + subresources: + status: {} + validation: + openAPIV3Schema: + description: ServiceCA provides information to configure an operator to manage + the service cert controllers + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + logLevel: + description: logLevel is an intent based logging for an overall component. It + does not give fine grained control, but it is a simple way to manage + coarse grained logging choices that operators have to interpret for + their operands. + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has + observed from the cluster state. It exists in spec because it is + an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: operatorLogLevel is an intent based logging for the operator + itself. It does not give fine grained control, but it is a simple + way to manage coarse grained logging choices that operators have to + interpret for themselves. + type: string + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to + be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a + given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01_crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..61170cbb9377adc6de5f1c84aac9dc0015ba74d6 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01_crd.yaml @@ -0,0 +1,371 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networks.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: Network + listKind: NetworkList + plural: networks + singular: network + scope: Cluster + preserveUnknownFields: false + versions: + - name: v1 + served: true + storage: true + validation: + # Ensure we will be able to deserialize the object into the golang type + openAPIV3Schema: + description: Network describes the cluster's desired network configuration. + It is consumed by the cluster-network-operator. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSpec is the top-level network configuration object. + type: object + properties: + additionalNetworks: + description: additionalNetworks is a list of extra networks to make + available to pods when multiple networks are enabled. + type: array + items: + description: AdditionalNetworkDefinition configures an extra network + that is available but not created by default. Instead, pods must + request them by name. type must be specified, along with exactly + one "Config" that matches the type. + type: object + properties: + name: + description: name is the name of the network. This will be populated + in the resulting CRD This must be unique. + type: string + namespace: + description: namespace is the namespace of the network. This will + be populated in the resulting CRD If not given the network will + be created in the default namespace. + type: string + rawCNIConfig: + description: rawCNIConfig is the raw CNI configuration json to + create in the NetworkAttachmentDefinition CRD + type: string + simpleMacvlanConfig: + description: SimpleMacvlanConfig configures the macvlan interface + in case of type:NetworkTypeSimpleMacvlan + type: object + properties: + ipamConfig: + description: IPAMConfig configures IPAM module will be used + for IP Address Management (IPAM). + type: object + properties: + staticIPAMConfig: + description: StaticIPAMConfig configures the static IP + address in case of type:IPAMTypeStatic + type: object + properties: + addresses: + description: Addresses configures IP address for the + interface + type: array + items: + description: StaticIPAMAddresses provides IP address + and Gateway for static IPAM addresses + type: object + properties: + address: + description: Address is the IP address in CIDR + format + type: string + gateway: + description: Gateway is IP inside of subnet + to designate as the gateway + type: string + dns: + description: DNS configures DNS for the interface + type: object + properties: + domain: + description: Domain configures the domainname + the local domain used for short hostname lookups + type: string + nameservers: + description: Nameservers points DNS servers for + IP lookup + type: array + items: + type: string + search: + description: Search configures priority ordered + search domains for short hostname lookups + type: array + items: + type: string + routes: + description: Routes configures IP routes for the interface + type: array + items: + description: StaticIPAMRoutes provides Destination/Gateway + pairs for static IPAM routes + type: object + properties: + destination: + description: Destination points the IP route + destination + type: string + gateway: + description: Gateway is the route's next-hop + IP address If unset, a default gateway is + assumed (as determined by the CNI plugin). + type: string + type: + description: Type is the type of IPAM module will be used + for IP Address Management(IPAM). The supported values + are IPAMTypeDHCP, IPAMTypeStatic + type: string + master: + description: master is the host interface to create the macvlan + interface from. If not specified, it will be default route + interface + type: string + mode: + description: 'mode is the macvlan mode: bridge, private, vepa, + passthru. The default is bridge' + type: string + mtu: + description: mtu is the mtu to use for the macvlan interface. + if unset, host's kernel will select the value. + type: integer + format: int32 + minimum: 0 + type: + description: type is the type of network The supported values + are NetworkTypeRaw, NetworkTypeSimpleMacvlan + type: string + clusterNetwork: + description: clusterNetwork is the IP address pool to use for pod IPs. + Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. + Others only support one. This is equivalent to the cluster-cidr. + type: array + items: + description: ClusterNetworkEntry is a subnet from which to allocate + PodIPs. A network of size HostPrefix (in CIDR notation) will be + allocated when nodes join the cluster. Not all network providers + support multiple ClusterNetworks + type: object + properties: + cidr: + type: string + hostPrefix: + type: integer + format: int32 + minimum: 0 + defaultNetwork: + description: defaultNetwork is the "default" network that all pods will + receive + type: object + properties: + kuryrConfig: + description: KuryrConfig configures the kuryr plugin + type: object + properties: + controllerProbesPort: + description: The port kuryr-controller will listen for readiness + and liveness requests. + type: integer + format: int32 + minimum: 0 + daemonProbesPort: + description: The port kuryr-daemon will listen for readiness + and liveness requests. + type: integer + format: int32 + minimum: 0 + enablePortPoolsPrepopulation: + description: enablePortPoolsPrepopulation when true will make + Kuryr prepopulate each newly created port pool with a minimum + number of ports. Kuryr uses Neutron port pooling to fight + the fact that it takes a significant amount of time to create + one. Instead of creating it when pod is being deployed, Kuryr + keeps a number of ports ready to be attached to pods. By default + port prepopulation is disabled. + type: boolean + openStackServiceNetwork: + description: openStackServiceNetwork contains the CIDR of network + from which to allocate IPs for OpenStack Octavia's Amphora + VMs. Please note that with Amphora driver Octavia uses two + IPs from that network for each loadbalancer - one given by + OpenShift and second for VRRP connections. As the first one + is managed by OpenShift's and second by Neutron's IPAMs, those + need to come from different pools. Therefore `openStackServiceNetwork` + needs to be at least twice the size of `serviceNetwork`, and + whole `serviceNetwork` must be overlapping with `openStackServiceNetwork`. + cluster-network-operator will then make sure VRRP IPs are + taken from the ranges inside `openStackServiceNetwork` that + are not overlapping with `serviceNetwork`, effectivly preventing + conflicts. If not set cluster-network-operator will use `serviceNetwork` + expanded by decrementing the prefix size by 1. + type: string + poolBatchPorts: + description: poolBatchPorts sets a number of ports that should + be created in a single batch request to extend the port pool. + The default is 3. For more information about port pools see + enablePortPoolsPrepopulation setting. + type: integer + minimum: 0 + poolMaxPorts: + description: poolMaxPorts sets a maximum number of free ports + that are being kept in a port pool. If the number of ports + exceeds this setting, free ports will get deleted. Setting + 0 will disable this upper bound, effectively preventing pools + from shrinking and this is the default value. For more information + about port pools see enablePortPoolsPrepopulation setting. + type: integer + minimum: 0 + poolMinPorts: + description: poolMinPorts sets a minimum number of free ports + that should be kept in a port pool. If the number of ports + is lower than this setting, new ports will get created and + added to pool. The default is 1. For more information about + port pools see enablePortPoolsPrepopulation setting. + type: integer + minimum: 1 + openshiftSDNConfig: + description: openShiftSDNConfig configures the openshift-sdn plugin + type: object + properties: + enableUnidling: + description: enableUnidling controls whether or not the service + proxy will support idling and unidling of services. By default, + unidling is enabled. + type: boolean + mode: + description: mode is one of "Multitenant", "Subnet", or "NetworkPolicy" + type: string + mtu: + description: mtu is the mtu to use for the tunnel interface. + Defaults to 1450 if unset. This must be 50 bytes smaller than + the machine's uplink. + type: integer + format: int32 + minimum: 0 + useExternalOpenvswitch: + description: useExternalOpenvswitch tells the operator not to + install openvswitch, because it will be provided separately. + If set, you must provide it yourself. + type: boolean + vxlanPort: + description: vxlanPort is the port to use for all vxlan packets. + The default is 4789. + type: integer + format: int32 + minimum: 0 + ovnKubernetesConfig: + description: oVNKubernetesConfig configures the ovn-kubernetes plugin. + This is currently not implemented. + type: object + properties: + genevePort: + description: geneve port is the UDP port to be used by geneve + encapulation. Default is 6081 + type: integer + format: int32 + minimum: 1 + hybridOverlayConfig: + description: HybridOverlayConfig configures an additional overlay + network for peers that are not using OVN. + type: object + properties: + hybridClusterNetwork: + description: HybridClusterNetwork defines a network space + given to nodes on an additional overlay network. + type: array + items: + description: ClusterNetworkEntry is a subnet from which + to allocate PodIPs. A network of size HostPrefix (in + CIDR notation) will be allocated when nodes join the + cluster. Not all network providers support multiple + ClusterNetworks + type: object + properties: + cidr: + type: string + hostPrefix: + type: integer + format: int32 + minimum: 0 + mtu: + description: mtu is the MTU to use for the tunnel interface. + This must be 100 bytes smaller than the uplink mtu. Default + is 1400 + type: integer + format: int32 + minimum: 0 + type: + description: type is the type of network All NetworkTypes are supported + except for NetworkTypeRaw + type: string + deployKubeProxy: + description: deployKubeProxy specifies whether or not a standalone kube-proxy + should be deployed by the operator. Some network providers include + kube-proxy or similar functionality. If unset, the plugin will attempt + to select the correct value, which is false when OpenShift SDN and + ovn-kubernetes are used and true otherwise. + type: boolean + disableMultiNetwork: + description: disableMultiNetwork specifies whether or not multiple pod + network support should be disabled. If unset, this property defaults + to 'false' and multiple network support is enabled. + type: boolean + kubeProxyConfig: + description: kubeProxyConfig lets us configure desired proxy configuration. + If not specified, sensible defaults will be chosen by OpenShift directly. + Not consumed by all network providers - currently only openshift-sdn. + type: object + properties: + bindAddress: + description: The address to "bind" on Defaults to 0.0.0.0 + type: string + iptablesSyncPeriod: + description: 'The period that iptables rules are refreshed. Default: + 30s' + type: string + proxyArguments: + description: Any additional arguments to pass to the kubeproxy process + type: object + additionalProperties: + description: ProxyArgumentList is a list of arguments to pass + to the kubeproxy process + type: array + items: + type: string + logLevel: + description: logLevel allows configuring the logging level of the components + deployed by the operator. Currently only Kuryr SDN is affected by + this setting. Please note that turning on extensive logging may affect + performance. The default value is "Normal". + type: string + serviceNetwork: + description: serviceNetwork is the ip address pool to use for Service + IPs Currently, all existing network providers only support a single + value here, but this is an array to allow for growth. + type: array + items: + type: string + status: + description: NetworkStatus is currently unused. Instead, status is reported + in the Network.config.openshift.io object. + type: object diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e227469c9745422a396226986d44636e2a326f21 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml @@ -0,0 +1,199 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: consoles.operator.openshift.io +spec: + scope: Cluster + preserveUnknownFields: false + group: operator.openshift.io + names: + kind: Console + listKind: ConsoleList + plural: consoles + singular: console + subresources: + status: {} + versions: + - name: v1 + served: true + storage: true + validation: + openAPIV3Schema: + description: Console provides a means to configure an operator to manage the + console. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsoleSpec is the specification of the desired behavior of + the Console. + type: object + properties: + customization: + description: customization is used to optionally provide a small set + of customization options to the web console. + type: object + properties: + brand: + description: brand is the default branding of the web console which + can be overridden by providing the brand field. There is a limited + set of specific brand options. This field controls elements of + the console such as the logo. Invalid value will prevent a console + rollout. + type: string + pattern: ^$|^(ocp|origin|okd|dedicated|online|azure)$ + customLogoFile: + description: 'customLogoFile replaces the default OpenShift logo + in the masthead and about dialog. It is a reference to a ConfigMap + in the openshift-config namespace. This can be created with a + command like ''oc create configmap custom-logo --from-file=/path/to/file + -n openshift-config''. Image size must be less than 1 MB due to + constraints on the ConfigMap size. The ConfigMap key should include + a file extension so that the console serves the file with the + correct MIME type. Recommended logo specifications: Dimensions: + Max height of 68px and max width of 200px SVG format preferred' + type: object + properties: + key: + description: Key allows pointing to a specific key/value inside + of the configmap. This is useful for logical file references. + type: string + name: + type: string + customProductName: + description: customProductName is the name that will be displayed + in page titles, logo alt text, and the about dialog instead of + the normal OpenShift product name. + type: string + documentationBaseURL: + description: documentationBaseURL links to external documentation + are shown in various sections of the web console. Providing documentationBaseURL + will override the default documentation URL. Invalid value will + prevent a console rollout. + type: string + pattern: ^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))\/$ + logLevel: + description: logLevel is an intent based logging for an overall component. It + does not give fine grained control, but it is a simple way to manage + coarse grained logging choices that operators have to interpret for + their operands. + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has + observed from the cluster state. It exists in spec because it is + an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: operatorLogLevel is an intent based logging for the operator + itself. It does not give fine grained control, but it is a simple + way to manage coarse grained logging choices that operators have to + interpret for themselves. + type: string + providers: + description: providers contains configuration for using specific service + providers. + type: object + properties: + statuspage: + description: statuspage contains ID for statuspage.io page that + provides status info about. + type: object + properties: + pageID: + description: pageID is the unique ID assigned by Statuspage + for your page. This must be a public page. + type: string + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: ConsoleStatus defines the observed status of the Console. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to + be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a + given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00-custom-resource-definition.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00-custom-resource-definition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e553f1f656f84c6a494d208d248d06229e14d623 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00-custom-resource-definition.yaml @@ -0,0 +1,142 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: dnses.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: DNS + listKind: DNSList + plural: dnses + singular: dns + scope: Cluster + preserveUnknownFields: false + subresources: + status: {} + validation: + openAPIV3Schema: + description: "DNS manages the CoreDNS component to provide a name resolution + service for pods and services in the cluster. \n This supports the DNS-based + service discovery specification: https://github.com/kubernetes/dns/blob/master/docs/specification.md + \n More details: https://kubernetes.io/docs/tasks/administer-cluster/coredns" + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the DNS. + type: object + properties: + servers: + description: "servers is a list of DNS resolvers that provide name query + delegation for one or more subdomains outside the scope of the cluster + domain. If servers consists of more than one Server, longest suffix + match will be used to determine the Server. \n For example, if there + are two Servers, one for \"foo.com\" and another for \"a.foo.com\", + and the name query is for \"www.a.foo.com\", it will be routed to + the Server with Zone \"a.foo.com\". \n If this field is nil, no servers + are created." + type: array + items: + description: Server defines the schema for a server that runs per + instance of CoreDNS. + type: object + properties: + forwardPlugin: + description: forwardPlugin defines a schema for configuring CoreDNS + to proxy DNS messages to upstream resolvers. + type: object + properties: + upstreams: + description: "upstreams is a list of resolvers to forward + name queries for subdomains of Zones. Upstreams are randomized + when more than 1 upstream is specified. Each instance of + CoreDNS performs health checking of Upstreams. When a healthy + upstream returns an error during the exchange, another resolver + is tried from Upstreams. Each upstream is represented by + an IP address or IP:port if the upstream listens on a port + other than 53. \n A maximum of 15 upstreams is allowed per + ForwardPlugin." + type: array + maxItems: 15 + items: + type: string + name: + description: name is required and specifies a unique name for + the server. Name must comply with the Service Name Syntax of + rfc6335. + type: string + zones: + description: zones is required and specifies the subdomains that + Server is authoritative for. Zones must conform to the rfc1123 + definition of a subdomain. Specifying the cluster domain (i.e., + "cluster.local") is invalid. + type: array + items: + type: string + status: + description: status is the most recently observed status of the DNS. + type: object + required: + - clusterDomain + - clusterIP + properties: + clusterDomain: + description: "clusterDomain is the local cluster DNS domain suffix for + DNS services. This will be a subdomain as defined in RFC 1034, section + 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 Example: \"cluster.local\" + \n More info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service" + type: string + clusterIP: + description: "clusterIP is the service IP through which this DNS is + made available. \n In the case of the default DNS, this will be a + well known IP that is used as the default nameserver for pods that + are using the default ClusterFirst DNS policy. \n In general, this + IP can be specified in a pod's spec.dnsConfig.nameservers list or + used explicitly when performing name resolution from within the cluster. + Example: dig foo.com @<service IP> \n More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies" + type: string + conditions: + description: "conditions provide information about the state of the + DNS on the cluster. \n These are the supported DNS conditions: \n + \ * Available - True if the following conditions are met: * + DNS controller daemonset is available. - False if any of those conditions + are unsatisfied." + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + version: v1 + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d8cd7528bdc4f08605ccf3953897515400c4944a --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml @@ -0,0 +1,142 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: csisnapshotcontrollers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: CSISnapshotController + plural: csisnapshotcontrollers + singular: csisnapshotcontroller + preserveUnknownFields: false + scope: Cluster + subresources: + status: {} + version: v1 + versions: + - name: v1 + served: true + storage: true + validation: + openAPIV3Schema: + description: CSISnapshotController provides a means to configure an operator + to manage the CSI snapshots. `cluster` is the canonical name. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + logLevel: + description: logLevel is an intent based logging for an overall component. It + does not give fine grained control, but it is a simple way to manage + coarse grained logging choices that operators have to interpret for + their operands. + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has + observed from the cluster state. It exists in spec because it is + an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: operatorLogLevel is an intent based logging for the operator + itself. It does not give fine grained control, but it is a simple + way to manage coarse grained logging choices that operators have to + interpret for themselves. + type: string + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to + be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a + given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string diff --git a/vendor/github.com/openshift/api/operator/v1/doc.go b/vendor/github.com/openshift/api/operator/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..3de961a7fc278a80abdf8729d144de1421b6315b --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=operator.openshift.io +package v1 diff --git a/vendor/github.com/openshift/api/operator/v1/register.go b/vendor/github.com/openshift/api/operator/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..f5ceba65543c075e4d093504c9e1ab8b4d5052d2 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/register.go @@ -0,0 +1,70 @@ +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "operator.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &Authentication{}, + &AuthenticationList{}, + &DNS{}, + &DNSList{}, + &Console{}, + &ConsoleList{}, + &CSISnapshotController{}, + &CSISnapshotControllerList{}, + &Etcd{}, + &EtcdList{}, + &KubeAPIServer{}, + &KubeAPIServerList{}, + &KubeControllerManager{}, + &KubeControllerManagerList{}, + &KubeScheduler{}, + &KubeSchedulerList{}, + &KubeStorageVersionMigrator{}, + &KubeStorageVersionMigratorList{}, + &Network{}, + &NetworkList{}, + &OpenShiftAPIServer{}, + &OpenShiftAPIServerList{}, + &OpenShiftControllerManager{}, + &OpenShiftControllerManagerList{}, + &ServiceCA{}, + &ServiceCAList{}, + &ServiceCatalogAPIServer{}, + &ServiceCatalogAPIServerList{}, + &ServiceCatalogControllerManager{}, + &ServiceCatalogControllerManagerList{}, + &IngressController{}, + &IngressControllerList{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/api/operator/v1/types.go b/vendor/github.com/openshift/api/operator/v1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..faf5a96c165cad3c6191dd27b6bbf9fb4203eb62 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types.go @@ -0,0 +1,214 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// MyOperatorResource is an example operator configuration type +type MyOperatorResource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec MyOperatorResourceSpec `json:"spec"` + Status MyOperatorResourceStatus `json:"status"` +} + +type MyOperatorResourceSpec struct { + OperatorSpec `json:",inline"` +} + +type MyOperatorResourceStatus struct { + OperatorStatus `json:",inline"` +} + +// +kubebuilder:validation:Pattern=`^(Managed|Unmanaged|Force|Removed)$` +type ManagementState string + +var ( + // Force means that the operator is actively managing its resources but will not block an upgrade + // if unmet prereqs exist. This state puts the operator at risk for unsuccessful upgrades + Force ManagementState = "Force" + // Managed means that the operator is actively managing its resources and trying to keep the component active. + // It will only upgrade the component if it is safe to do so + Managed ManagementState = "Managed" + // Unmanaged means that the operator will not take any action related to the component + // Some operators might not support this management state as it might damage the cluster and lead to manual recovery. + Unmanaged ManagementState = "Unmanaged" + // Removed means that the operator is actively managing its resources and trying to remove all traces of the component + // Some operators (like kube-apiserver-operator) might not support this management state as removing the API server will + // brick the cluster. + Removed ManagementState = "Removed" +) + +// OperatorSpec contains common fields operators need. It is intended to be anonymous included +// inside of the Spec struct for your particular operator. +type OperatorSpec struct { + // managementState indicates whether and how the operator should manage the component + ManagementState ManagementState `json:"managementState"` + + // logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a + // simple way to manage coarse grained logging choices that operators have to interpret for their operands. + // +optional + LogLevel LogLevel `json:"logLevel"` + + // operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a + // simple way to manage coarse grained logging choices that operators have to interpret for themselves. + // +optional + OperatorLogLevel LogLevel `json:"operatorLogLevel"` + + // unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override + // it will end up overlaying in the following order: + // 1. hardcoded defaults + // 2. observedConfig + // 3. unsupportedConfigOverrides + // +optional + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + UnsupportedConfigOverrides runtime.RawExtension `json:"unsupportedConfigOverrides"` + + // observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because + // it is an input to the level for the operator + // +optional + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + ObservedConfig runtime.RawExtension `json:"observedConfig"` +} + +type LogLevel string + +var ( + // Normal is the default. Normal, working log information, everything is fine, but helpful notices for auditing or common operations. In kube, this is probably glog=2. + Normal LogLevel = "Normal" + + // Debug is used when something went wrong. Even common operations may be logged, and less helpful but more quantity of notices. In kube, this is probably glog=4. + Debug LogLevel = "Debug" + + // Trace is used when something went really badly and even more verbose logs are needed. Logging every function call as part of a common operation, to tracing execution of a query. In kube, this is probably glog=6. + Trace LogLevel = "Trace" + + // TraceAll is used when something is broken at the level of API content/decoding. It will dump complete body content. If you turn this on in a production cluster + // prepare from serious performance issues and massive amounts of logs. In kube, this is probably glog=8. + TraceAll LogLevel = "TraceAll" +) + +type OperatorStatus struct { + // observedGeneration is the last generation change you've dealt with + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // conditions is a list of conditions and their status + // +optional + Conditions []OperatorCondition `json:"conditions,omitempty"` + + // version is the level this availability applies to + // +optional + Version string `json:"version,omitempty"` + + // readyReplicas indicates how many replicas are ready and at the desired state + ReadyReplicas int32 `json:"readyReplicas"` + + // generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + // +optional + Generations []GenerationStatus `json:"generations,omitempty"` +} + +// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +type GenerationStatus struct { + // group is the group of the thing you're tracking + Group string `json:"group"` + // resource is the resource type of the thing you're tracking + Resource string `json:"resource"` + // namespace is where the thing you're tracking is + Namespace string `json:"namespace"` + // name is the name of the thing you're tracking + Name string `json:"name"` + // lastGeneration is the last generation of the workload controller involved + LastGeneration int64 `json:"lastGeneration"` + // hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + Hash string `json:"hash"` +} + +var ( + // Available indicates that the operand is present and accessible in the cluster + OperatorStatusTypeAvailable = "Available" + // Progressing indicates that the operator is trying to transition the operand to a different state + OperatorStatusTypeProgressing = "Progressing" + // Degraded indicates that the operator (not the operand) is unable to fulfill the user intent + OperatorStatusTypeDegraded = "Degraded" + // PrereqsSatisfied indicates that the things this operator depends on are present and at levels compatible with the + // current and desired states. + OperatorStatusTypePrereqsSatisfied = "PrereqsSatisfied" + // Upgradeable indicates that the operator configuration itself (not prereqs) can be auto-upgraded by the CVO + OperatorStatusTypeUpgradeable = "Upgradeable" +) + +// OperatorCondition is just the standard condition fields. +type OperatorCondition struct { + Type string `json:"type"` + Status ConditionStatus `json:"status"` + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + Reason string `json:"reason,omitempty"` + Message string `json:"message,omitempty"` +} + +type ConditionStatus string + +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// StaticPodOperatorSpec is spec for controllers that manage static pods. +type StaticPodOperatorSpec struct { + OperatorSpec `json:",inline"` + + // forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. + // This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work + // this time instead of failing again on the same config. + ForceRedeploymentReason string `json:"forceRedeploymentReason"` + + // failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api + // -1 = unlimited, 0 or unset = 5 (default) + FailedRevisionLimit int32 `json:"failedRevisionLimit,omitempty"` + // succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api + // -1 = unlimited, 0 or unset = 5 (default) + SucceededRevisionLimit int32 `json:"succeededRevisionLimit,omitempty"` +} + +// StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual +// node status must be tracked. +type StaticPodOperatorStatus struct { + OperatorStatus `json:",inline"` + + // latestAvailableRevision is the deploymentID of the most recent deployment + // +optional + LatestAvailableRevision int32 `json:"latestAvailableRevision,omitEmpty"` + + // latestAvailableRevisionReason describe the detailed reason for the most recent deployment + // +optional + LatestAvailableRevisionReason string `json:"latestAvailableRevisionReason,omitEmpty"` + + // nodeStatuses track the deployment values and errors across individual nodes + // +optional + NodeStatuses []NodeStatus `json:"nodeStatuses,omitempty"` +} + +// NodeStatus provides information about the current state of a particular node managed by this operator. +type NodeStatus struct { + // nodeName is the name of the node + NodeName string `json:"nodeName"` + + // currentRevision is the generation of the most recently successful deployment + CurrentRevision int32 `json:"currentRevision"` + // targetRevision is the generation of the deployment we're trying to apply + TargetRevision int32 `json:"targetRevision,omitempty"` + // lastFailedRevision is the generation of the deployment we tried and failed to deploy. + LastFailedRevision int32 `json:"lastFailedRevision,omitempty"` + + // lastFailedRevisionErrors is a list of the errors during the failed deployment referenced in lastFailedRevision + LastFailedRevisionErrors []string `json:"lastFailedRevisionErrors,omitempty"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_authentication.go b/vendor/github.com/openshift/api/operator/v1/types_authentication.go new file mode 100644 index 0000000000000000000000000000000000000000..403028dfd07e063ccd561aa6b48682ef49fb76fb --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_authentication.go @@ -0,0 +1,43 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Authentication provides information to configure an operator to manage authentication. +type Authentication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:Required + // +required + Spec AuthenticationSpec `json:"spec,omitempty"` + // +optional + Status AuthenticationStatus `json:"status,omitempty"` +} + +type AuthenticationSpec struct { + OperatorSpec `json:",inline"` +} + +type AuthenticationStatus struct { + // ManagingOAuthAPIServer indicates whether this operator is managing OAuth related APIs. Setting this field to true will cause OAS-O to step down. + // Note that this field will be removed in the future releases, once https://github.com/openshift/enhancements/blob/master/enhancements/authentication/separate-oauth-resources.md is fully implemented + // +optional + ManagingOAuthAPIServer bool `json:"managingOAuthAPIServer,omitempty"` + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuthenticationList is a collection of items +type AuthenticationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Authentication `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_console.go b/vendor/github.com/openshift/api/operator/v1/types_console.go new file mode 100644 index 0000000000000000000000000000000000000000..f766df48f0b30371c4adc66293d69f3f10e7c81b --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_console.go @@ -0,0 +1,111 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + configv1 "github.com/openshift/api/config/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Console provides a means to configure an operator to manage the console. +type Console struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:Required + // +required + Spec ConsoleSpec `json:"spec,omitempty"` + // +optional + Status ConsoleStatus `json:"status,omitempty"` +} + +// ConsoleSpec is the specification of the desired behavior of the Console. +type ConsoleSpec struct { + OperatorSpec `json:",inline"` + // customization is used to optionally provide a small set of + // customization options to the web console. + // +optional + Customization ConsoleCustomization `json:"customization"` + // providers contains configuration for using specific service providers. + Providers ConsoleProviders `json:"providers"` +} + +// ConsoleStatus defines the observed status of the Console. +type ConsoleStatus struct { + OperatorStatus `json:",inline"` +} + +// ConsoleProviders defines a list of optional additional providers of +// functionality to the console. +type ConsoleProviders struct { + // statuspage contains ID for statuspage.io page that provides status info about. + // +optional + Statuspage *StatuspageProvider `json:"statuspage,omitempty"` +} + +// StatuspageProvider provides identity for statuspage account. +type StatuspageProvider struct { + // pageID is the unique ID assigned by Statuspage for your page. This must be a public page. + PageID string `json:"pageID"` +} + +// ConsoleCustomization defines a list of optional configuration for the console UI. +type ConsoleCustomization struct { + // brand is the default branding of the web console which can be overridden by + // providing the brand field. There is a limited set of specific brand options. + // This field controls elements of the console such as the logo. + // Invalid value will prevent a console rollout. + Brand Brand `json:"brand,omitempty"` + // documentationBaseURL links to external documentation are shown in various sections + // of the web console. Providing documentationBaseURL will override the default + // documentation URL. + // Invalid value will prevent a console rollout. + // +kubebuilder:validation:Pattern=`^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))\/$` + DocumentationBaseURL string `json:"documentationBaseURL,omitempty"` + // customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog + // instead of the normal OpenShift product name. + // +optional + CustomProductName string `json:"customProductName,omitempty"` + // customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a + // ConfigMap in the openshift-config namespace. This can be created with a command like + // 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. + // Image size must be less than 1 MB due to constraints on the ConfigMap size. + // The ConfigMap key should include a file extension so that the console serves the file + // with the correct MIME type. + // Recommended logo specifications: + // Dimensions: Max height of 68px and max width of 200px + // SVG format preferred + // +optional + CustomLogoFile configv1.ConfigMapFileReference `json:"customLogoFile,omitempty"` +} + +// Brand is a specific supported brand within the console. +// +kubebuilder:validation:Pattern=`^$|^(ocp|origin|okd|dedicated|online|azure)$` +type Brand string + +const ( + // Branding for OpenShift + BrandOpenShift Brand = "openshift" + // Branding for The Origin Community Distribution of Kubernetes + BrandOKD Brand = "okd" + // Branding for OpenShift Online + BrandOnline Brand = "online" + // Branding for OpenShift Container Platform + BrandOCP Brand = "ocp" + // Branding for OpenShift Dedicated + BrandDedicated Brand = "dedicated" + // Branding for Azure Red Hat OpenShift + BrandAzure Brand = "azure" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ConsoleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Console `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go b/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go new file mode 100644 index 0000000000000000000000000000000000000000..5b6c06aaff1e027c2030a79b81aeb1d36652dfcb --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go @@ -0,0 +1,44 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name. +type CSISnapshotController struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec CSISnapshotControllerSpec `json:"spec"` + + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status CSISnapshotControllerStatus `json:"status"` +} + +// CSISnapshotControllerSpec is the specification of the desired behavior of the CSISnapshotController operator. +type CSISnapshotControllerSpec struct { + OperatorSpec `json:",inline"` +} + +// CSISnapshotControllerStatus defines the observed status of the CSISnapshotController operator. +type CSISnapshotControllerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// CSISnapshotControllerList contains a list of CSISnapshotControllers. +type CSISnapshotControllerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CSISnapshotController `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_dns.go b/vendor/github.com/openshift/api/operator/v1/types_dns.go new file mode 100644 index 0000000000000000000000000000000000000000..5bc361468527e743d40786c36e61b044a2644d69 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_dns.go @@ -0,0 +1,133 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=dnses,scope=Cluster +// +kubebuilder:subresource:status + +// DNS manages the CoreDNS component to provide a name resolution service +// for pods and services in the cluster. +// +// This supports the DNS-based service discovery specification: +// https://github.com/kubernetes/dns/blob/master/docs/specification.md +// +// More details: https://kubernetes.io/docs/tasks/administer-cluster/coredns +type DNS struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the specification of the desired behavior of the DNS. + Spec DNSSpec `json:"spec,omitempty"` + // status is the most recently observed status of the DNS. + Status DNSStatus `json:"status,omitempty"` +} + +// DNSSpec is the specification of the desired behavior of the DNS. +type DNSSpec struct { + // servers is a list of DNS resolvers that provide name query delegation for one or + // more subdomains outside the scope of the cluster domain. If servers consists of + // more than one Server, longest suffix match will be used to determine the Server. + // + // For example, if there are two Servers, one for "foo.com" and another for "a.foo.com", + // and the name query is for "www.a.foo.com", it will be routed to the Server with Zone + // "a.foo.com". + // + // If this field is nil, no servers are created. + // + // +optional + Servers []Server `json:"servers,omitempty"` +} + +// Server defines the schema for a server that runs per instance of CoreDNS. +type Server struct { + // name is required and specifies a unique name for the server. Name must comply + // with the Service Name Syntax of rfc6335. + Name string `json:"name"` + // zones is required and specifies the subdomains that Server is authoritative for. + // Zones must conform to the rfc1123 definition of a subdomain. Specifying the + // cluster domain (i.e., "cluster.local") is invalid. + Zones []string `json:"zones"` + // forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages + // to upstream resolvers. + ForwardPlugin ForwardPlugin `json:"forwardPlugin"` +} + +// ForwardPlugin defines a schema for configuring the CoreDNS forward plugin. +type ForwardPlugin struct { + // upstreams is a list of resolvers to forward name queries for subdomains of Zones. + // Upstreams are randomized when more than 1 upstream is specified. Each instance of + // CoreDNS performs health checking of Upstreams. When a healthy upstream returns an + // error during the exchange, another resolver is tried from Upstreams. Each upstream + // is represented by an IP address or IP:port if the upstream listens on a port other + // than 53. + // + // A maximum of 15 upstreams is allowed per ForwardPlugin. + // + // +kubebuilder:validation:MaxItems=15 + Upstreams []string `json:"upstreams"` +} + +const ( + // Available indicates the DNS controller daemonset is available. + DNSAvailable = "Available" +) + +// DNSStatus defines the observed status of the DNS. +type DNSStatus struct { + // clusterIP is the service IP through which this DNS is made available. + // + // In the case of the default DNS, this will be a well known IP that is used + // as the default nameserver for pods that are using the default ClusterFirst DNS policy. + // + // In general, this IP can be specified in a pod's spec.dnsConfig.nameservers list + // or used explicitly when performing name resolution from within the cluster. + // Example: dig foo.com @<service IP> + // + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // + // +kubebuilder:validation:Required + // +required + ClusterIP string `json:"clusterIP"` + + // clusterDomain is the local cluster DNS domain suffix for DNS services. + // This will be a subdomain as defined in RFC 1034, + // section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 + // Example: "cluster.local" + // + // More info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service + // + // +kubebuilder:validation:Required + // +required + ClusterDomain string `json:"clusterDomain"` + + // conditions provide information about the state of the DNS on the cluster. + // + // These are the supported DNS conditions: + // + // * Available + // - True if the following conditions are met: + // * DNS controller daemonset is available. + // - False if any of those conditions are unsatisfied. + // + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []OperatorCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// DNSList contains a list of DNS +type DNSList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + Items []DNS `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_etcd.go b/vendor/github.com/openshift/api/operator/v1/types_etcd.go new file mode 100644 index 0000000000000000000000000000000000000000..6a2fbdb9acf9cddf7ea8d998a50828130125bf13 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_etcd.go @@ -0,0 +1,40 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Etcd provides information to configure an operator to manage kube-apiserver. +type Etcd struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec EtcdSpec `json:"spec"` + // +optional + Status EtcdStatus `json:"status"` +} + +type EtcdSpec struct { + StaticPodOperatorSpec `json:",inline"` +} + +type EtcdStatus struct { + StaticPodOperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeAPISOperatorConfigList is a collection of items +type EtcdList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []Etcd `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go new file mode 100644 index 0000000000000000000000000000000000000000..70b5f8389038934e5ce4c5e697c433db13db108a --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -0,0 +1,438 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + corev1 "k8s.io/api/core/v1" + + configv1 "github.com/openshift/api/config/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.availableReplicas,selectorpath=.status.selector + +// IngressController describes a managed ingress controller for the cluster. The +// controller can service OpenShift Route and Kubernetes Ingress resources. +// +// When an IngressController is created, a new ingress controller deployment is +// created to allow external traffic to reach the services that expose Ingress +// or Route resources. Updating this resource may lead to disruption for public +// facing network connections as a new ingress controller revision may be rolled +// out. +// +// https://kubernetes.io/docs/concepts/services-networking/ingress-controllers +// +// Whenever possible, sensible defaults for the platform are used. See each +// field for more details. +type IngressController struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the specification of the desired behavior of the IngressController. + Spec IngressControllerSpec `json:"spec,omitempty"` + // status is the most recently observed status of the IngressController. + Status IngressControllerStatus `json:"status,omitempty"` +} + +// IngressControllerSpec is the specification of the desired behavior of the +// IngressController. +type IngressControllerSpec struct { + // domain is a DNS name serviced by the ingress controller and is used to + // configure multiple features: + // + // * For the LoadBalancerService endpoint publishing strategy, domain is + // used to configure DNS records. See endpointPublishingStrategy. + // + // * When using a generated default certificate, the certificate will be valid + // for domain and its subdomains. See defaultCertificate. + // + // * The value is published to individual Route statuses so that end-users + // know where to target external DNS records. + // + // domain must be unique among all IngressControllers, and cannot be + // updated. + // + // If empty, defaults to ingress.config.openshift.io/cluster .spec.domain. + // + // +optional + Domain string `json:"domain,omitempty"` + + // replicas is the desired number of ingress controller replicas. If unset, + // defaults to 2. + // + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // endpointPublishingStrategy is used to publish the ingress controller + // endpoints to other networks, enable load balancer integrations, etc. + // + // If unset, the default is based on + // infrastructure.config.openshift.io/cluster .status.platform: + // + // AWS: LoadBalancerService (with External scope) + // Azure: LoadBalancerService (with External scope) + // GCP: LoadBalancerService (with External scope) + // IBMCloud: LoadBalancerService (with External scope) + // Libvirt: HostNetwork + // + // Any other platform types (including None) default to HostNetwork. + // + // endpointPublishingStrategy cannot be updated. + // + // +optional + EndpointPublishingStrategy *EndpointPublishingStrategy `json:"endpointPublishingStrategy,omitempty"` + + // defaultCertificate is a reference to a secret containing the default + // certificate served by the ingress controller. When Routes don't specify + // their own certificate, defaultCertificate is used. + // + // The secret must contain the following keys and data: + // + // tls.crt: certificate file contents + // tls.key: key file contents + // + // If unset, a wildcard certificate is automatically generated and used. The + // certificate is valid for the ingress controller domain (and subdomains) and + // the generated certificate's CA will be automatically integrated with the + // cluster's trust store. + // + // The in-use certificate (whether generated or user-specified) will be + // automatically integrated with OpenShift's built-in OAuth server. + // + // +optional + DefaultCertificate *corev1.LocalObjectReference `json:"defaultCertificate,omitempty"` + + // namespaceSelector is used to filter the set of namespaces serviced by the + // ingress controller. This is useful for implementing shards. + // + // If unset, the default is no filtering. + // + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` + + // routeSelector is used to filter the set of Routes serviced by the ingress + // controller. This is useful for implementing shards. + // + // If unset, the default is no filtering. + // + // +optional + RouteSelector *metav1.LabelSelector `json:"routeSelector,omitempty"` + + // nodePlacement enables explicit control over the scheduling of the ingress + // controller. + // + // If unset, defaults are used. See NodePlacement for more details. + // + // +optional + NodePlacement *NodePlacement `json:"nodePlacement,omitempty"` + + // tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers. + // + // If unset, the default is based on the apiservers.config.openshift.io/cluster resource. + // + // Note that when using the Old, Intermediate, and Modern profile types, the effective + // profile configuration is subject to change between releases. For example, given + // a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade + // to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress + // controller, resulting in a rollout. + // + // Note that the minimum TLS version for ingress controllers is 1.1, and + // the maximum TLS version is 1.2. An implication of this restriction + // is that the Modern TLS profile type cannot be used because it + // requires TLS 1.3. + // + // +optional + TLSSecurityProfile *configv1.TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` + + // routeAdmission defines a policy for handling new route claims (for example, + // to allow or deny claims across namespaces). + // + // If empty, defaults will be applied. See specific routeAdmission fields + // for details about their defaults. + // + // +optional + RouteAdmission *RouteAdmissionPolicy `json:"routeAdmission,omitempty"` +} + +// NodePlacement describes node scheduling configuration for an ingress +// controller. +type NodePlacement struct { + // nodeSelector is the node selector applied to ingress controller + // deployments. + // + // If unset, the default is: + // + // beta.kubernetes.io/os: linux + // node-role.kubernetes.io/worker: '' + // + // If set, the specified selector is used and replaces the default. + // + // +optional + NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"` + + // tolerations is a list of tolerations applied to ingress controller + // deployments. + // + // The default is an empty list. + // + // See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + // + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +// EndpointPublishingStrategyType is a way to publish ingress controller endpoints. +// +kubebuilder:validation:Enum=LoadBalancerService;HostNetwork;Private;NodePortService +type EndpointPublishingStrategyType string + +const ( + // LoadBalancerService publishes the ingress controller using a Kubernetes + // LoadBalancer Service. + LoadBalancerServiceStrategyType EndpointPublishingStrategyType = "LoadBalancerService" + + // HostNetwork publishes the ingress controller on node ports where the + // ingress controller is deployed. + HostNetworkStrategyType EndpointPublishingStrategyType = "HostNetwork" + + // Private does not publish the ingress controller. + PrivateStrategyType EndpointPublishingStrategyType = "Private" + + // NodePortService publishes the ingress controller using a Kubernetes NodePort Service. + NodePortServiceStrategyType EndpointPublishingStrategyType = "NodePortService" +) + +// LoadBalancerScope is the scope at which a load balancer is exposed. +// +kubebuilder:validation:Enum=Internal;External +type LoadBalancerScope string + +var ( + // InternalLoadBalancer is a load balancer that is exposed only on the + // cluster's private network. + InternalLoadBalancer LoadBalancerScope = "Internal" + + // ExternalLoadBalancer is a load balancer that is exposed on the + // cluster's public network (which is typically on the Internet). + ExternalLoadBalancer LoadBalancerScope = "External" +) + +// LoadBalancerStrategy holds parameters for a load balancer. +type LoadBalancerStrategy struct { + // scope indicates the scope at which the load balancer is exposed. + // Possible values are "External" and "Internal". + // + // +kubebuilder:validation:Required + // +required + Scope LoadBalancerScope `json:"scope"` +} + +// HostNetworkStrategy holds parameters for the HostNetwork endpoint publishing +// strategy. +type HostNetworkStrategy struct { +} + +// PrivateStrategy holds parameters for the Private endpoint publishing +// strategy. +type PrivateStrategy struct { +} + +// NodePortStrategy holds parameters for the NodePortService endpoint publishing strategy. +type NodePortStrategy struct { +} + +// EndpointPublishingStrategy is a way to publish the endpoints of an +// IngressController, and represents the type and any additional configuration +// for a specific type. +// +union +type EndpointPublishingStrategy struct { + // type is the publishing strategy to use. Valid values are: + // + // * LoadBalancerService + // + // Publishes the ingress controller using a Kubernetes LoadBalancer Service. + // + // In this configuration, the ingress controller deployment uses container + // networking. A LoadBalancer Service is created to publish the deployment. + // + // See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + // + // If domain is set, a wildcard DNS record will be managed to point at the + // LoadBalancer Service's external name. DNS records are managed only in DNS + // zones defined by dns.config.openshift.io/cluster .spec.publicZone and + // .spec.privateZone. + // + // Wildcard DNS management is currently supported only on the AWS, Azure, + // and GCP platforms. + // + // * HostNetwork + // + // Publishes the ingress controller on node ports where the ingress controller + // is deployed. + // + // In this configuration, the ingress controller deployment uses host + // networking, bound to node ports 80 and 443. The user is responsible for + // configuring an external load balancer to publish the ingress controller via + // the node ports. + // + // * Private + // + // Does not publish the ingress controller. + // + // In this configuration, the ingress controller deployment uses container + // networking, and is not explicitly published. The user must manually publish + // the ingress controller. + // + // * NodePortService + // + // Publishes the ingress controller using a Kubernetes NodePort Service. + // + // In this configuration, the ingress controller deployment uses container + // networking. A NodePort Service is created to publish the deployment. The + // specific node ports are dynamically allocated by OpenShift; however, to + // support static port allocations, user changes to the node port + // field of the managed NodePort Service will preserved. + // + // +unionDiscriminator + // +kubebuilder:validation:Required + // +required + Type EndpointPublishingStrategyType `json:"type"` + + // loadBalancer holds parameters for the load balancer. Present only if + // type is LoadBalancerService. + // +optional + LoadBalancer *LoadBalancerStrategy `json:"loadBalancer,omitempty"` + + // hostNetwork holds parameters for the HostNetwork endpoint publishing + // strategy. Present only if type is HostNetwork. + // +optional + HostNetwork *HostNetworkStrategy `json:"hostNetwork,omitempty"` + + // private holds parameters for the Private endpoint publishing + // strategy. Present only if type is Private. + // +optional + Private *PrivateStrategy `json:"private,omitempty"` + + // nodePort holds parameters for the NodePortService endpoint publishing strategy. + // Present only if type is NodePortService. + // +optional + NodePort *NodePortStrategy `json:"nodePort,omitempty"` +} + +// RouteAdmissionPolicy is an admission policy for allowing new route claims. +type RouteAdmissionPolicy struct { + // namespaceOwnership describes how host name claims across namespaces should + // be handled. + // + // Value must be one of: + // + // - Strict: Do not allow routes in different namespaces to claim the same host. + // + // - InterNamespaceAllowed: Allow routes to claim different paths of the same + // host name across namespaces. + // + // If empty, the default is Strict. + // +optional + NamespaceOwnership NamespaceOwnershipCheck `json:"namespaceOwnership,omitempty"` +} + +// NamespaceOwnershipCheck is a route admission policy component that describes +// how host name claims across namespaces should be handled. +// +kubebuilder:validation:Enum=InterNamespaceAllowed;Strict +type NamespaceOwnershipCheck string + +const ( + // InterNamespaceAllowedOwnershipCheck allows routes to claim different paths of the same host name across namespaces. + InterNamespaceAllowedOwnershipCheck NamespaceOwnershipCheck = "InterNamespaceAllowed" + + // StrictNamespaceOwnershipCheck does not allow routes to claim the same host name across namespaces. + StrictNamespaceOwnershipCheck NamespaceOwnershipCheck = "Strict" +) + +var ( + // Available indicates the ingress controller deployment is available. + IngressControllerAvailableConditionType = "Available" + // LoadBalancerManaged indicates the management status of any load balancer + // service associated with an ingress controller. + LoadBalancerManagedIngressConditionType = "LoadBalancerManaged" + // LoadBalancerReady indicates the ready state of any load balancer service + // associated with an ingress controller. + LoadBalancerReadyIngressConditionType = "LoadBalancerReady" + // DNSManaged indicates the management status of any DNS records for the + // ingress controller. + DNSManagedIngressConditionType = "DNSManaged" + // DNSReady indicates the ready state of any DNS records for the ingress + // controller. + DNSReadyIngressConditionType = "DNSReady" +) + +// IngressControllerStatus defines the observed status of the IngressController. +type IngressControllerStatus struct { + // availableReplicas is number of observed available replicas according to the + // ingress controller deployment. + AvailableReplicas int32 `json:"availableReplicas"` + + // selector is a label selector, in string format, for ingress controller pods + // corresponding to the IngressController. The number of matching pods should + // equal the value of availableReplicas. + Selector string `json:"selector"` + + // domain is the actual domain in use. + Domain string `json:"domain"` + + // endpointPublishingStrategy is the actual strategy in use. + EndpointPublishingStrategy *EndpointPublishingStrategy `json:"endpointPublishingStrategy,omitempty"` + + // conditions is a list of conditions and their status. + // + // Available means the ingress controller deployment is available and + // servicing route and ingress resources (i.e, .status.availableReplicas + // equals .spec.replicas) + // + // There are additional conditions which indicate the status of other + // ingress controller features and capabilities. + // + // * LoadBalancerManaged + // - True if the following conditions are met: + // * The endpoint publishing strategy requires a service load balancer. + // - False if any of those conditions are unsatisfied. + // + // * LoadBalancerReady + // - True if the following conditions are met: + // * A load balancer is managed. + // * The load balancer is ready. + // - False if any of those conditions are unsatisfied. + // + // * DNSManaged + // - True if the following conditions are met: + // * The endpoint publishing strategy and platform support DNS. + // * The ingress controller domain is set. + // * dns.config.openshift.io/cluster configures DNS zones. + // - False if any of those conditions are unsatisfied. + // + // * DNSReady + // - True if the following conditions are met: + // * DNS is managed. + // * DNS records have been successfully created. + // - False if any of those conditions are unsatisfied. + Conditions []OperatorCondition `json:"conditions,omitempty"` + + // tlsProfile is the TLS connection configuration that is in effect. + // +optional + TLSProfile *configv1.TLSProfileSpec `json:"tlsProfile,omitempty"` + + // observedGeneration is the most recent generation observed. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// IngressControllerList contains a list of IngressControllers. +type IngressControllerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IngressController `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go new file mode 100644 index 0000000000000000000000000000000000000000..cd657c55424b8e6db218876b2a5a973842dc90e1 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go @@ -0,0 +1,43 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeAPIServer provides information to configure an operator to manage kube-apiserver. +type KubeAPIServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Kubernetes API Server + // +kubebuilder:validation:Required + // +required + Spec KubeAPIServerSpec `json:"spec"` + + // status is the most recently observed status of the Kubernetes API Server + // +optional + Status KubeAPIServerStatus `json:"status"` +} + +type KubeAPIServerSpec struct { + StaticPodOperatorSpec `json:",inline"` +} + +type KubeAPIServerStatus struct { + StaticPodOperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeAPIServerList is a collection of items +type KubeAPIServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []KubeAPIServer `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go new file mode 100644 index 0000000000000000000000000000000000000000..c20ae30ccd7cb683aca3698cdd6791d74327f765 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go @@ -0,0 +1,43 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeControllerManager provides information to configure an operator to manage kube-controller-manager. +type KubeControllerManager struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Kubernetes Controller Manager + // +kubebuilder:validation:Required + // +required + Spec KubeControllerManagerSpec `json:"spec"` + + // status is the most recently observed status of the Kubernetes Controller Manager + // +optional + Status KubeControllerManagerStatus `json:"status"` +} + +type KubeControllerManagerSpec struct { + StaticPodOperatorSpec `json:",inline"` +} + +type KubeControllerManagerStatus struct { + StaticPodOperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeControllerManagerList is a collection of items +type KubeControllerManagerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []KubeControllerManager `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go b/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go new file mode 100644 index 0000000000000000000000000000000000000000..5949ac021a770e92e19cd34c956b25a081b7a5a3 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go @@ -0,0 +1,40 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator. +type KubeStorageVersionMigrator struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec KubeStorageVersionMigratorSpec `json:"spec"` + // +optional + Status KubeStorageVersionMigratorStatus `json:"status"` +} + +type KubeStorageVersionMigratorSpec struct { + OperatorSpec `json:",inline"` +} + +type KubeStorageVersionMigratorStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeStorageVersionMigratorList is a collection of items +type KubeStorageVersionMigratorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []KubeStorageVersionMigrator `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go new file mode 100644 index 0000000000000000000000000000000000000000..92f78b5cddb70924f6ab16c89c5d0d907fafad0e --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -0,0 +1,404 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Network describes the cluster's desired network configuration. It is +// consumed by the cluster-network-operator. +// +k8s:openapi-gen=true +type Network struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NetworkSpec `json:"spec,omitempty"` + Status NetworkStatus `json:"status,omitempty"` +} + +// NetworkStatus is currently unused. Instead, status +// is reported in the Network.config.openshift.io object. +type NetworkStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetworkList contains a list of Network configurations +type NetworkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Network `json:"items"` +} + +// NetworkSpec is the top-level network configuration object. +type NetworkSpec struct { + // clusterNetwork is the IP address pool to use for pod IPs. + // Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. + // Others only support one. This is equivalent to the cluster-cidr. + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` + + // serviceNetwork is the ip address pool to use for Service IPs + // Currently, all existing network providers only support a single value + // here, but this is an array to allow for growth. + ServiceNetwork []string `json:"serviceNetwork"` + + // defaultNetwork is the "default" network that all pods will receive + DefaultNetwork DefaultNetworkDefinition `json:"defaultNetwork"` + + // additionalNetworks is a list of extra networks to make available to pods + // when multiple networks are enabled. + AdditionalNetworks []AdditionalNetworkDefinition `json:"additionalNetworks,omitempty"` + + // disableMultiNetwork specifies whether or not multiple pod network + // support should be disabled. If unset, this property defaults to + // 'false' and multiple network support is enabled. + DisableMultiNetwork *bool `json:"disableMultiNetwork,omitempty"` + + // deployKubeProxy specifies whether or not a standalone kube-proxy should + // be deployed by the operator. Some network providers include kube-proxy + // or similar functionality. If unset, the plugin will attempt to select + // the correct value, which is false when OpenShift SDN and ovn-kubernetes are + // used and true otherwise. + // +optional + DeployKubeProxy *bool `json:"deployKubeProxy,omitempty"` + + // kubeProxyConfig lets us configure desired proxy configuration. + // If not specified, sensible defaults will be chosen by OpenShift directly. + // Not consumed by all network providers - currently only openshift-sdn. + KubeProxyConfig *ProxyConfig `json:"kubeProxyConfig,omitempty"` + + // logLevel allows configuring the logging level of the components deployed + // by the operator. Currently only Kuryr SDN is affected by this setting. + // Please note that turning on extensive logging may affect performance. + // The default value is "Normal". + // +optional + LogLevel LogLevel `json:"logLevel"` +} + +// ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size +// HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. +// Not all network providers support multiple ClusterNetworks +type ClusterNetworkEntry struct { + CIDR string `json:"cidr"` + // +kubebuilder:validation:Minimum=0 + HostPrefix uint32 `json:"hostPrefix"` +} + +// DefaultNetworkDefinition represents a single network plugin's configuration. +// type must be specified, along with exactly one "Config" that matches the type. +type DefaultNetworkDefinition struct { + // type is the type of network + // All NetworkTypes are supported except for NetworkTypeRaw + Type NetworkType `json:"type"` + + // openShiftSDNConfig configures the openshift-sdn plugin + // +optional + OpenShiftSDNConfig *OpenShiftSDNConfig `json:"openshiftSDNConfig,omitempty"` + + // oVNKubernetesConfig configures the ovn-kubernetes plugin. This is currently + // not implemented. + // +optional + OVNKubernetesConfig *OVNKubernetesConfig `json:"ovnKubernetesConfig,omitempty"` + + // KuryrConfig configures the kuryr plugin + // +optional + KuryrConfig *KuryrConfig `json:"kuryrConfig,omitempty"` +} + +// SimpleMacvlanConfig contains configurations for macvlan interface. +type SimpleMacvlanConfig struct { + // master is the host interface to create the macvlan interface from. + // If not specified, it will be default route interface + // +optional + Master string `json:"master,omitempty"` + + // IPAMConfig configures IPAM module will be used for IP Address Management (IPAM). + // +optional + IPAMConfig *IPAMConfig `json:"ipamConfig,omitempty"` + + // mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge + // +optional + Mode MacvlanMode `json:"mode,omitempty"` + + // mtu is the mtu to use for the macvlan interface. if unset, host's + // kernel will select the value. + // +kubebuilder:validation:Minimum=0 + // +optional + MTU uint32 `json:"mtu,omitempty"` +} + +// StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses +type StaticIPAMAddresses struct { + // Address is the IP address in CIDR format + // +optional + Address string `json:"address"` + // Gateway is IP inside of subnet to designate as the gateway + // +optional + Gateway string `json:"gateway,omitempty"` +} + +// StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes +type StaticIPAMRoutes struct { + // Destination points the IP route destination + Destination string `json:"destination"` + // Gateway is the route's next-hop IP address + // If unset, a default gateway is assumed (as determined by the CNI plugin). + // +optional + Gateway string `json:"gateway,omitempty"` +} + +// StaticIPAMDNS provides DNS related information for static IPAM +type StaticIPAMDNS struct { + // Nameservers points DNS servers for IP lookup + // +optional + Nameservers []string `json:"nameservers,omitempty"` + // Domain configures the domainname the local domain used for short hostname lookups + // +optional + Domain string `json:"domain,omitempty"` + // Search configures priority ordered search domains for short hostname lookups + // +optional + Search []string `json:"search,omitempty"` +} + +// StaticIPAMConfig contains configurations for static IPAM (IP Address Management) +type StaticIPAMConfig struct { + // Addresses configures IP address for the interface + // +optional + Addresses []StaticIPAMAddresses `json:"addresses,omitempty"` + // Routes configures IP routes for the interface + // +optional + Routes []StaticIPAMRoutes `json:"routes,omitempty"` + // DNS configures DNS for the interface + // +optional + DNS *StaticIPAMDNS `json:"dns,omitempty"` +} + +// IPAMConfig contains configurations for IPAM (IP Address Management) +type IPAMConfig struct { + // Type is the type of IPAM module will be used for IP Address Management(IPAM). + // The supported values are IPAMTypeDHCP, IPAMTypeStatic + Type IPAMType `json:"type"` + + // StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic + // +optional + StaticIPAMConfig *StaticIPAMConfig `json:"staticIPAMConfig,omitempty"` +} + +// AdditionalNetworkDefinition configures an extra network that is available but not +// created by default. Instead, pods must request them by name. +// type must be specified, along with exactly one "Config" that matches the type. +type AdditionalNetworkDefinition struct { + // type is the type of network + // The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan + Type NetworkType `json:"type"` + + // name is the name of the network. This will be populated in the resulting CRD + // This must be unique. + Name string `json:"name"` + + // namespace is the namespace of the network. This will be populated in the resulting CRD + // If not given the network will be created in the default namespace. + Namespace string `json:"namespace,omitempty"` + + // rawCNIConfig is the raw CNI configuration json to create in the + // NetworkAttachmentDefinition CRD + RawCNIConfig string `json:"rawCNIConfig,omitempty"` + + // SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan + // +optional + SimpleMacvlanConfig *SimpleMacvlanConfig `json:"simpleMacvlanConfig,omitempty"` +} + +// OpenShiftSDNConfig configures the three openshift-sdn plugins +type OpenShiftSDNConfig struct { + // mode is one of "Multitenant", "Subnet", or "NetworkPolicy" + Mode SDNMode `json:"mode"` + + // vxlanPort is the port to use for all vxlan packets. The default is 4789. + // +kubebuilder:validation:Minimum=0 + // +optional + VXLANPort *uint32 `json:"vxlanPort,omitempty"` + + // mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. + // This must be 50 bytes smaller than the machine's uplink. + // +kubebuilder:validation:Minimum=0 + // +optional + MTU *uint32 `json:"mtu,omitempty"` + + // useExternalOpenvswitch tells the operator not to install openvswitch, because + // it will be provided separately. If set, you must provide it yourself. + // +optional + UseExternalOpenvswitch *bool `json:"useExternalOpenvswitch,omitempty"` + + // enableUnidling controls whether or not the service proxy will support idling + // and unidling of services. By default, unidling is enabled. + EnableUnidling *bool `json:"enableUnidling,omitempty"` +} + +// KuryrConfig configures the Kuryr-Kubernetes SDN +type KuryrConfig struct { + // The port kuryr-daemon will listen for readiness and liveness requests. + // +kubebuilder:validation:Minimum=0 + // +optional + DaemonProbesPort *uint32 `json:"daemonProbesPort,omitempty"` + + // The port kuryr-controller will listen for readiness and liveness requests. + // +kubebuilder:validation:Minimum=0 + // +optional + ControllerProbesPort *uint32 `json:"controllerProbesPort,omitempty"` + + // openStackServiceNetwork contains the CIDR of network from which to allocate IPs for + // OpenStack Octavia's Amphora VMs. Please note that with Amphora driver Octavia uses + // two IPs from that network for each loadbalancer - one given by OpenShift and second + // for VRRP connections. As the first one is managed by OpenShift's and second by Neutron's + // IPAMs, those need to come from different pools. Therefore `openStackServiceNetwork` + // needs to be at least twice the size of `serviceNetwork`, and whole `serviceNetwork` + // must be overlapping with `openStackServiceNetwork`. cluster-network-operator will then + // make sure VRRP IPs are taken from the ranges inside `openStackServiceNetwork` that + // are not overlapping with `serviceNetwork`, effectivly preventing conflicts. If not set + // cluster-network-operator will use `serviceNetwork` expanded by decrementing the prefix + // size by 1. + // +optional + OpenStackServiceNetwork string `json:"openStackServiceNetwork,omitempty"` + + // enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port + // pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact + // that it takes a significant amount of time to create one. Instead of creating it when + // pod is being deployed, Kuryr keeps a number of ports ready to be attached to pods. By + // default port prepopulation is disabled. + // +optional + EnablePortPoolsPrepopulation bool `json:"enablePortPoolsPrepopulation,omitempty"` + + // poolMaxPorts sets a maximum number of free ports that are being kept in a port pool. + // If the number of ports exceeds this setting, free ports will get deleted. Setting 0 + // will disable this upper bound, effectively preventing pools from shrinking and this + // is the default value. For more information about port pools see + // enablePortPoolsPrepopulation setting. + // +kubebuilder:validation:Minimum=0 + // +optional + PoolMaxPorts uint `json:"poolMaxPorts,omitempty"` + + // poolMinPorts sets a minimum number of free ports that should be kept in a port pool. + // If the number of ports is lower than this setting, new ports will get created and + // added to pool. The default is 1. For more information about port pools see + // enablePortPoolsPrepopulation setting. + // +kubebuilder:validation:Minimum=1 + // +optional + PoolMinPorts uint `json:"poolMinPorts,omitempty"` + + // poolBatchPorts sets a number of ports that should be created in a single batch request + // to extend the port pool. The default is 3. For more information about port pools see + // enablePortPoolsPrepopulation setting. + // +kubebuilder:validation:Minimum=0 + // +optional + PoolBatchPorts *uint `json:"poolBatchPorts,omitempty"` +} + +// ovnKubernetesConfig contains the configuration parameters for networks +// using the ovn-kubernetes network project +type OVNKubernetesConfig struct { + // mtu is the MTU to use for the tunnel interface. This must be 100 + // bytes smaller than the uplink mtu. + // Default is 1400 + // +kubebuilder:validation:Minimum=0 + // +optional + MTU *uint32 `json:"mtu,omitempty"` + // geneve port is the UDP port to be used by geneve encapulation. + // Default is 6081 + // +kubebuilder:validation:Minimum=1 + // +optional + GenevePort *uint32 `json:"genevePort,omitempty"` + // HybridOverlayConfig configures an additional overlay network for peers that are + // not using OVN. + // +optional + HybridOverlayConfig *HybridOverlayConfig `json:"hybridOverlayConfig,omitempty"` +} + +type HybridOverlayConfig struct { + // HybridClusterNetwork defines a network space given to nodes on an additional overlay network. + HybridClusterNetwork []ClusterNetworkEntry `json:"hybridClusterNetwork"` +} + +// NetworkType describes the network plugin type to configure +type NetworkType string + +// ProxyArgumentList is a list of arguments to pass to the kubeproxy process +type ProxyArgumentList []string + +// ProxyConfig defines the configuration knobs for kubeproxy +// All of these are optional and have sensible defaults +type ProxyConfig struct { + // The period that iptables rules are refreshed. + // Default: 30s + IptablesSyncPeriod string `json:"iptablesSyncPeriod,omitempty"` + + // The address to "bind" on + // Defaults to 0.0.0.0 + BindAddress string `json:"bindAddress,omitempty"` + + // Any additional arguments to pass to the kubeproxy process + ProxyArguments map[string]ProxyArgumentList `json:"proxyArguments,omitempty"` +} + +const ( + // NetworkTypeOpenShiftSDN means the openshift-sdn plugin will be configured + NetworkTypeOpenShiftSDN NetworkType = "OpenShiftSDN" + + // NetworkTypeOVNKubernetes means the ovn-kubernetes project will be configured. + // This is currently not implemented. + NetworkTypeOVNKubernetes NetworkType = "OVNKubernetes" + + // NetworkTypeKuryr means the kuryr-kubernetes project will be configured. + NetworkTypeKuryr NetworkType = "Kuryr" + + // NetworkTypeRaw + NetworkTypeRaw NetworkType = "Raw" + + // NetworkTypeSimpleMacvlan + NetworkTypeSimpleMacvlan NetworkType = "SimpleMacvlan" +) + +// SDNMode is the Mode the openshift-sdn plugin is in +type SDNMode string + +const ( + // SDNModeSubnet is a simple mode that offers no isolation between pods + SDNModeSubnet SDNMode = "Subnet" + + // SDNModeMultitenant is a special "multitenant" mode that offers limited + // isolation configuration between namespaces + SDNModeMultitenant SDNMode = "Multitenant" + + // SDNModeNetworkPolicy is a full NetworkPolicy implementation that allows + // for sophisticated network isolation and segmenting. This is the default. + SDNModeNetworkPolicy SDNMode = "NetworkPolicy" +) + +// MacvlanMode is the Mode of macvlan. The value are lowercase to match the CNI plugin +// config values. See "man ip-link" for its detail. +type MacvlanMode string + +const ( + // MacvlanModeBridge is the macvlan with thin bridge function. + MacvlanModeBridge MacvlanMode = "Bridge" + // MacvlanModePrivate + MacvlanModePrivate MacvlanMode = "Private" + // MacvlanModeVEPA is used with Virtual Ethernet Port Aggregator + // (802.1qbg) swtich + MacvlanModeVEPA MacvlanMode = "VEPA" + // MacvlanModePassthru + MacvlanModePassthru MacvlanMode = "Passthru" +) + +// IPAMType describes the IP address management type to configure +type IPAMType string + +const ( + // IPAMTypeDHCP uses DHCP for IP management + IPAMTypeDHCP IPAMType = "DHCP" + // IPAMTypeStatic uses static IP + IPAMTypeStatic IPAMType = "Static" +) diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go new file mode 100644 index 0000000000000000000000000000000000000000..8ab50ed321e5ab710859e99fc9a7b365077edfdd --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go @@ -0,0 +1,50 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver. +type OpenShiftAPIServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the OpenShift API Server. + // +kubebuilder:validation:Required + // +required + Spec OpenShiftAPIServerSpec `json:"spec"` + + // status defines the observed status of the OpenShift API Server. + // +optional + Status OpenShiftAPIServerStatus `json:"status"` +} + +type OpenShiftAPIServerSpec struct { + OperatorSpec `json:",inline"` +} + +type OpenShiftAPIServerStatus struct { + OperatorStatus `json:",inline"` + + // latestAvailableRevision is the latest revision used as suffix of revisioned + // secrets like encryption-config. A new revision causes a new deployment of + // pods. + // +optional + // +kubebuilder:validation:Minimum=0 + LatestAvailableRevision int32 `json:"latestAvailableRevision,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenShiftAPIServerList is a collection of items +type OpenShiftAPIServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []OpenShiftAPIServer `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go new file mode 100644 index 0000000000000000000000000000000000000000..0f23b01be27c200a107897f92a20db6283329045 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go @@ -0,0 +1,40 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager. +type OpenShiftControllerManager struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec OpenShiftControllerManagerSpec `json:"spec"` + // +optional + Status OpenShiftControllerManagerStatus `json:"status"` +} + +type OpenShiftControllerManagerSpec struct { + OperatorSpec `json:",inline"` +} + +type OpenShiftControllerManagerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenShiftControllerManagerList is a collection of items +type OpenShiftControllerManagerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []OpenShiftControllerManager `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_scheduler.go b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go new file mode 100644 index 0000000000000000000000000000000000000000..f8a542082c0fc1552a7d10bbcd37b3e8bdaf02ec --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go @@ -0,0 +1,43 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeScheduler provides information to configure an operator to manage scheduler. +type KubeScheduler struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Kubernetes Scheduler + // +kubebuilder:validation:Required + // +required + Spec KubeSchedulerSpec `json:"spec"` + + // status is the most recently observed status of the Kubernetes Scheduler + // +optional + Status KubeSchedulerStatus `json:"status"` +} + +type KubeSchedulerSpec struct { + StaticPodOperatorSpec `json:",inline"` +} + +type KubeSchedulerStatus struct { + StaticPodOperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeSchedulerList is a collection of items +type KubeSchedulerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []KubeScheduler `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_serviceca.go b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go new file mode 100644 index 0000000000000000000000000000000000000000..b8d5e2646a423887ec0983070ef19d268cf303a9 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go @@ -0,0 +1,42 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCA provides information to configure an operator to manage the service cert controllers +type ServiceCA struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + //spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ServiceCASpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ServiceCAStatus `json:"status"` +} + +type ServiceCASpec struct { + OperatorSpec `json:",inline"` +} + +type ServiceCAStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCAList is a collection of items +type ServiceCAList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []ServiceCA `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go new file mode 100644 index 0000000000000000000000000000000000000000..7c1a857bb61bd98e3ce4a3ff9721e839c5fd32eb --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go @@ -0,0 +1,40 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogAPIServer provides information to configure an operator to manage Service Catalog API Server +type ServiceCatalogAPIServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:Required + // +required + Spec ServiceCatalogAPIServerSpec `json:"spec"` + // +optional + Status ServiceCatalogAPIServerStatus `json:"status"` +} + +type ServiceCatalogAPIServerSpec struct { + OperatorSpec `json:",inline"` +} + +type ServiceCatalogAPIServerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogAPIServerList is a collection of items +type ServiceCatalogAPIServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []ServiceCatalogAPIServer `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go new file mode 100644 index 0000000000000000000000000000000000000000..ac3bf5898c8676410605a3ff5605c9c1b8372e05 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go @@ -0,0 +1,40 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogControllerManager provides information to configure an operator to manage Service Catalog Controller Manager +type ServiceCatalogControllerManager struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec ServiceCatalogControllerManagerSpec `json:"spec"` + // +optional + Status ServiceCatalogControllerManagerStatus `json:"status"` +} + +type ServiceCatalogControllerManagerSpec struct { + OperatorSpec `json:",inline"` +} + +type ServiceCatalogControllerManagerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogControllerManagerList is a collection of items +type ServiceCatalogControllerManagerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []ServiceCatalogControllerManager `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..158308a3e3af34e31caba483c651322d35e33544 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -0,0 +1,2437 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalNetworkDefinition) DeepCopyInto(out *AdditionalNetworkDefinition) { + *out = *in + if in.SimpleMacvlanConfig != nil { + in, out := &in.SimpleMacvlanConfig, &out.SimpleMacvlanConfig + *out = new(SimpleMacvlanConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalNetworkDefinition. +func (in *AdditionalNetworkDefinition) DeepCopy() *AdditionalNetworkDefinition { + if in == nil { + return nil + } + out := new(AdditionalNetworkDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authentication) DeepCopyInto(out *Authentication) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication. +func (in *Authentication) DeepCopy() *Authentication { + if in == nil { + return nil + } + out := new(Authentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Authentication) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Authentication, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationList. +func (in *AuthenticationList) DeepCopy() *AuthenticationList { + if in == nil { + return nil + } + out := new(AuthenticationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec. +func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec { + if in == nil { + return nil + } + out := new(AuthenticationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationStatus. +func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus { + if in == nil { + return nil + } + out := new(AuthenticationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotController) DeepCopyInto(out *CSISnapshotController) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotController. +func (in *CSISnapshotController) DeepCopy() *CSISnapshotController { + if in == nil { + return nil + } + out := new(CSISnapshotController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSISnapshotController) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotControllerList) DeepCopyInto(out *CSISnapshotControllerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CSISnapshotController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerList. +func (in *CSISnapshotControllerList) DeepCopy() *CSISnapshotControllerList { + if in == nil { + return nil + } + out := new(CSISnapshotControllerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSISnapshotControllerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotControllerSpec) DeepCopyInto(out *CSISnapshotControllerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerSpec. +func (in *CSISnapshotControllerSpec) DeepCopy() *CSISnapshotControllerSpec { + if in == nil { + return nil + } + out := new(CSISnapshotControllerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotControllerStatus) DeepCopyInto(out *CSISnapshotControllerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerStatus. +func (in *CSISnapshotControllerStatus) DeepCopy() *CSISnapshotControllerStatus { + if in == nil { + return nil + } + out := new(CSISnapshotControllerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Console) DeepCopyInto(out *Console) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console. +func (in *Console) DeepCopy() *Console { + if in == nil { + return nil + } + out := new(Console) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Console) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleCustomization) DeepCopyInto(out *ConsoleCustomization) { + *out = *in + out.CustomLogoFile = in.CustomLogoFile + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleCustomization. +func (in *ConsoleCustomization) DeepCopy() *ConsoleCustomization { + if in == nil { + return nil + } + out := new(ConsoleCustomization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleList) DeepCopyInto(out *ConsoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Console, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleList. +func (in *ConsoleList) DeepCopy() *ConsoleList { + if in == nil { + return nil + } + out := new(ConsoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleProviders) DeepCopyInto(out *ConsoleProviders) { + *out = *in + if in.Statuspage != nil { + in, out := &in.Statuspage, &out.Statuspage + *out = new(StatuspageProvider) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleProviders. +func (in *ConsoleProviders) DeepCopy() *ConsoleProviders { + if in == nil { + return nil + } + out := new(ConsoleProviders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + out.Customization = in.Customization + in.Providers.DeepCopyInto(&out.Providers) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSpec. +func (in *ConsoleSpec) DeepCopy() *ConsoleSpec { + if in == nil { + return nil + } + out := new(ConsoleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleStatus) DeepCopyInto(out *ConsoleStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStatus. +func (in *ConsoleStatus) DeepCopy() *ConsoleStatus { + if in == nil { + return nil + } + out := new(ConsoleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNS) DeepCopyInto(out *DNS) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. +func (in *DNS) DeepCopy() *DNS { + if in == nil { + return nil + } + out := new(DNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNS) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSList) DeepCopyInto(out *DNSList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSList. +func (in *DNSList) DeepCopy() *DNSList { + if in == nil { + return nil + } + out := new(DNSList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { + *out = *in + if in.Servers != nil { + in, out := &in.Servers, &out.Servers + *out = make([]Server, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec. +func (in *DNSSpec) DeepCopy() *DNSSpec { + if in == nil { + return nil + } + out := new(DNSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSStatus) DeepCopyInto(out *DNSStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSStatus. +func (in *DNSStatus) DeepCopy() *DNSStatus { + if in == nil { + return nil + } + out := new(DNSStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultNetworkDefinition) DeepCopyInto(out *DefaultNetworkDefinition) { + *out = *in + if in.OpenShiftSDNConfig != nil { + in, out := &in.OpenShiftSDNConfig, &out.OpenShiftSDNConfig + *out = new(OpenShiftSDNConfig) + (*in).DeepCopyInto(*out) + } + if in.OVNKubernetesConfig != nil { + in, out := &in.OVNKubernetesConfig, &out.OVNKubernetesConfig + *out = new(OVNKubernetesConfig) + (*in).DeepCopyInto(*out) + } + if in.KuryrConfig != nil { + in, out := &in.KuryrConfig, &out.KuryrConfig + *out = new(KuryrConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultNetworkDefinition. +func (in *DefaultNetworkDefinition) DeepCopy() *DefaultNetworkDefinition { + if in == nil { + return nil + } + out := new(DefaultNetworkDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPublishingStrategy) DeepCopyInto(out *EndpointPublishingStrategy) { + *out = *in + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(LoadBalancerStrategy) + **out = **in + } + if in.HostNetwork != nil { + in, out := &in.HostNetwork, &out.HostNetwork + *out = new(HostNetworkStrategy) + **out = **in + } + if in.Private != nil { + in, out := &in.Private, &out.Private + *out = new(PrivateStrategy) + **out = **in + } + if in.NodePort != nil { + in, out := &in.NodePort, &out.NodePort + *out = new(NodePortStrategy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPublishingStrategy. +func (in *EndpointPublishingStrategy) DeepCopy() *EndpointPublishingStrategy { + if in == nil { + return nil + } + out := new(EndpointPublishingStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Etcd) DeepCopyInto(out *Etcd) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Etcd. +func (in *Etcd) DeepCopy() *Etcd { + if in == nil { + return nil + } + out := new(Etcd) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Etcd) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdList) DeepCopyInto(out *EtcdList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Etcd, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdList. +func (in *EtcdList) DeepCopy() *EtcdList { + if in == nil { + return nil + } + out := new(EtcdList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EtcdList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdSpec) DeepCopyInto(out *EtcdSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdSpec. +func (in *EtcdSpec) DeepCopy() *EtcdSpec { + if in == nil { + return nil + } + out := new(EtcdSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdStatus) DeepCopyInto(out *EtcdStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStatus. +func (in *EtcdStatus) DeepCopy() *EtcdStatus { + if in == nil { + return nil + } + out := new(EtcdStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardPlugin) DeepCopyInto(out *ForwardPlugin) { + *out = *in + if in.Upstreams != nil { + in, out := &in.Upstreams, &out.Upstreams + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardPlugin. +func (in *ForwardPlugin) DeepCopy() *ForwardPlugin { + if in == nil { + return nil + } + out := new(ForwardPlugin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenerationStatus) DeepCopyInto(out *GenerationStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerationStatus. +func (in *GenerationStatus) DeepCopy() *GenerationStatus { + if in == nil { + return nil + } + out := new(GenerationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostNetworkStrategy) DeepCopyInto(out *HostNetworkStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostNetworkStrategy. +func (in *HostNetworkStrategy) DeepCopy() *HostNetworkStrategy { + if in == nil { + return nil + } + out := new(HostNetworkStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HybridOverlayConfig) DeepCopyInto(out *HybridOverlayConfig) { + *out = *in + if in.HybridClusterNetwork != nil { + in, out := &in.HybridClusterNetwork, &out.HybridClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HybridOverlayConfig. +func (in *HybridOverlayConfig) DeepCopy() *HybridOverlayConfig { + if in == nil { + return nil + } + out := new(HybridOverlayConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAMConfig) DeepCopyInto(out *IPAMConfig) { + *out = *in + if in.StaticIPAMConfig != nil { + in, out := &in.StaticIPAMConfig, &out.StaticIPAMConfig + *out = new(StaticIPAMConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfig. +func (in *IPAMConfig) DeepCopy() *IPAMConfig { + if in == nil { + return nil + } + out := new(IPAMConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressController) DeepCopyInto(out *IngressController) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressController. +func (in *IngressController) DeepCopy() *IngressController { + if in == nil { + return nil + } + out := new(IngressController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressController) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerList) DeepCopyInto(out *IngressControllerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IngressController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerList. +func (in *IngressControllerList) DeepCopy() *IngressControllerList { + if in == nil { + return nil + } + out := new(IngressControllerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressControllerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerSpec) DeepCopyInto(out *IngressControllerSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.EndpointPublishingStrategy != nil { + in, out := &in.EndpointPublishingStrategy, &out.EndpointPublishingStrategy + *out = new(EndpointPublishingStrategy) + (*in).DeepCopyInto(*out) + } + if in.DefaultCertificate != nil { + in, out := &in.DefaultCertificate, &out.DefaultCertificate + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.RouteSelector != nil { + in, out := &in.RouteSelector, &out.RouteSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.NodePlacement != nil { + in, out := &in.NodePlacement, &out.NodePlacement + *out = new(NodePlacement) + (*in).DeepCopyInto(*out) + } + if in.TLSSecurityProfile != nil { + in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile + *out = new(configv1.TLSSecurityProfile) + (*in).DeepCopyInto(*out) + } + if in.RouteAdmission != nil { + in, out := &in.RouteAdmission, &out.RouteAdmission + *out = new(RouteAdmissionPolicy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerSpec. +func (in *IngressControllerSpec) DeepCopy() *IngressControllerSpec { + if in == nil { + return nil + } + out := new(IngressControllerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerStatus) DeepCopyInto(out *IngressControllerStatus) { + *out = *in + if in.EndpointPublishingStrategy != nil { + in, out := &in.EndpointPublishingStrategy, &out.EndpointPublishingStrategy + *out = new(EndpointPublishingStrategy) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSProfile != nil { + in, out := &in.TLSProfile, &out.TLSProfile + *out = new(configv1.TLSProfileSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerStatus. +func (in *IngressControllerStatus) DeepCopy() *IngressControllerStatus { + if in == nil { + return nil + } + out := new(IngressControllerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServer) DeepCopyInto(out *KubeAPIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServer. +func (in *KubeAPIServer) DeepCopy() *KubeAPIServer { + if in == nil { + return nil + } + out := new(KubeAPIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeAPIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerList) DeepCopyInto(out *KubeAPIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeAPIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerList. +func (in *KubeAPIServerList) DeepCopy() *KubeAPIServerList { + if in == nil { + return nil + } + out := new(KubeAPIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeAPIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerSpec) DeepCopyInto(out *KubeAPIServerSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerSpec. +func (in *KubeAPIServerSpec) DeepCopy() *KubeAPIServerSpec { + if in == nil { + return nil + } + out := new(KubeAPIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerStatus) DeepCopyInto(out *KubeAPIServerStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerStatus. +func (in *KubeAPIServerStatus) DeepCopy() *KubeAPIServerStatus { + if in == nil { + return nil + } + out := new(KubeAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManager) DeepCopyInto(out *KubeControllerManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManager. +func (in *KubeControllerManager) DeepCopy() *KubeControllerManager { + if in == nil { + return nil + } + out := new(KubeControllerManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeControllerManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerList) DeepCopyInto(out *KubeControllerManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeControllerManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerList. +func (in *KubeControllerManagerList) DeepCopy() *KubeControllerManagerList { + if in == nil { + return nil + } + out := new(KubeControllerManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeControllerManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerSpec) DeepCopyInto(out *KubeControllerManagerSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerSpec. +func (in *KubeControllerManagerSpec) DeepCopy() *KubeControllerManagerSpec { + if in == nil { + return nil + } + out := new(KubeControllerManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerStatus) DeepCopyInto(out *KubeControllerManagerStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerStatus. +func (in *KubeControllerManagerStatus) DeepCopy() *KubeControllerManagerStatus { + if in == nil { + return nil + } + out := new(KubeControllerManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeScheduler) DeepCopyInto(out *KubeScheduler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeScheduler. +func (in *KubeScheduler) DeepCopy() *KubeScheduler { + if in == nil { + return nil + } + out := new(KubeScheduler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeScheduler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerList) DeepCopyInto(out *KubeSchedulerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeScheduler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerList. +func (in *KubeSchedulerList) DeepCopy() *KubeSchedulerList { + if in == nil { + return nil + } + out := new(KubeSchedulerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeSchedulerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerSpec) DeepCopyInto(out *KubeSchedulerSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerSpec. +func (in *KubeSchedulerSpec) DeepCopy() *KubeSchedulerSpec { + if in == nil { + return nil + } + out := new(KubeSchedulerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerStatus) DeepCopyInto(out *KubeSchedulerStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerStatus. +func (in *KubeSchedulerStatus) DeepCopy() *KubeSchedulerStatus { + if in == nil { + return nil + } + out := new(KubeSchedulerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigrator) DeepCopyInto(out *KubeStorageVersionMigrator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigrator. +func (in *KubeStorageVersionMigrator) DeepCopy() *KubeStorageVersionMigrator { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigrator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeStorageVersionMigrator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigratorList) DeepCopyInto(out *KubeStorageVersionMigratorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeStorageVersionMigrator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorList. +func (in *KubeStorageVersionMigratorList) DeepCopy() *KubeStorageVersionMigratorList { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigratorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeStorageVersionMigratorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigratorSpec) DeepCopyInto(out *KubeStorageVersionMigratorSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorSpec. +func (in *KubeStorageVersionMigratorSpec) DeepCopy() *KubeStorageVersionMigratorSpec { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigratorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigratorStatus) DeepCopyInto(out *KubeStorageVersionMigratorStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorStatus. +func (in *KubeStorageVersionMigratorStatus) DeepCopy() *KubeStorageVersionMigratorStatus { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigratorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KuryrConfig) DeepCopyInto(out *KuryrConfig) { + *out = *in + if in.DaemonProbesPort != nil { + in, out := &in.DaemonProbesPort, &out.DaemonProbesPort + *out = new(uint32) + **out = **in + } + if in.ControllerProbesPort != nil { + in, out := &in.ControllerProbesPort, &out.ControllerProbesPort + *out = new(uint32) + **out = **in + } + if in.PoolBatchPorts != nil { + in, out := &in.PoolBatchPorts, &out.PoolBatchPorts + *out = new(uint) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KuryrConfig. +func (in *KuryrConfig) DeepCopy() *KuryrConfig { + if in == nil { + return nil + } + out := new(KuryrConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerStrategy) DeepCopyInto(out *LoadBalancerStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStrategy. +func (in *LoadBalancerStrategy) DeepCopy() *LoadBalancerStrategy { + if in == nil { + return nil + } + out := new(LoadBalancerStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MyOperatorResource) DeepCopyInto(out *MyOperatorResource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResource. +func (in *MyOperatorResource) DeepCopy() *MyOperatorResource { + if in == nil { + return nil + } + out := new(MyOperatorResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MyOperatorResourceSpec) DeepCopyInto(out *MyOperatorResourceSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResourceSpec. +func (in *MyOperatorResourceSpec) DeepCopy() *MyOperatorResourceSpec { + if in == nil { + return nil + } + out := new(MyOperatorResourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MyOperatorResourceStatus) DeepCopyInto(out *MyOperatorResourceStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResourceStatus. +func (in *MyOperatorResourceStatus) DeepCopy() *MyOperatorResourceStatus { + if in == nil { + return nil + } + out := new(MyOperatorResourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Network) DeepCopyInto(out *Network) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. +func (in *Network) DeepCopy() *Network { + if in == nil { + return nil + } + out := new(Network) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Network) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkList) DeepCopyInto(out *NetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Network, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList. +func (in *NetworkList) DeepCopy() *NetworkList { + if in == nil { + return nil + } + out := new(NetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DefaultNetwork.DeepCopyInto(&out.DefaultNetwork) + if in.AdditionalNetworks != nil { + in, out := &in.AdditionalNetworks, &out.AdditionalNetworks + *out = make([]AdditionalNetworkDefinition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisableMultiNetwork != nil { + in, out := &in.DisableMultiNetwork, &out.DisableMultiNetwork + *out = new(bool) + **out = **in + } + if in.DeployKubeProxy != nil { + in, out := &in.DeployKubeProxy, &out.DeployKubeProxy + *out = new(bool) + **out = **in + } + if in.KubeProxyConfig != nil { + in, out := &in.KubeProxyConfig, &out.KubeProxyConfig + *out = new(ProxyConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus. +func (in *NetworkStatus) DeepCopy() *NetworkStatus { + if in == nil { + return nil + } + out := new(NetworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePlacement) DeepCopyInto(out *NodePlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePlacement. +func (in *NodePlacement) DeepCopy() *NodePlacement { + if in == nil { + return nil + } + out := new(NodePlacement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePortStrategy) DeepCopyInto(out *NodePortStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortStrategy. +func (in *NodePortStrategy) DeepCopy() *NodePortStrategy { + if in == nil { + return nil + } + out := new(NodePortStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in + if in.LastFailedRevisionErrors != nil { + in, out := &in.LastFailedRevisionErrors, &out.LastFailedRevisionErrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { + *out = *in + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(uint32) + **out = **in + } + if in.GenevePort != nil { + in, out := &in.GenevePort, &out.GenevePort + *out = new(uint32) + **out = **in + } + if in.HybridOverlayConfig != nil { + in, out := &in.HybridOverlayConfig, &out.HybridOverlayConfig + *out = new(HybridOverlayConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OVNKubernetesConfig. +func (in *OVNKubernetesConfig) DeepCopy() *OVNKubernetesConfig { + if in == nil { + return nil + } + out := new(OVNKubernetesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServer) DeepCopyInto(out *OpenShiftAPIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServer. +func (in *OpenShiftAPIServer) DeepCopy() *OpenShiftAPIServer { + if in == nil { + return nil + } + out := new(OpenShiftAPIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftAPIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServerList) DeepCopyInto(out *OpenShiftAPIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenShiftAPIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerList. +func (in *OpenShiftAPIServerList) DeepCopy() *OpenShiftAPIServerList { + if in == nil { + return nil + } + out := new(OpenShiftAPIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftAPIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServerSpec) DeepCopyInto(out *OpenShiftAPIServerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerSpec. +func (in *OpenShiftAPIServerSpec) DeepCopy() *OpenShiftAPIServerSpec { + if in == nil { + return nil + } + out := new(OpenShiftAPIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServerStatus) DeepCopyInto(out *OpenShiftAPIServerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerStatus. +func (in *OpenShiftAPIServerStatus) DeepCopy() *OpenShiftAPIServerStatus { + if in == nil { + return nil + } + out := new(OpenShiftAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManager) DeepCopyInto(out *OpenShiftControllerManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManager. +func (in *OpenShiftControllerManager) DeepCopy() *OpenShiftControllerManager { + if in == nil { + return nil + } + out := new(OpenShiftControllerManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftControllerManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManagerList) DeepCopyInto(out *OpenShiftControllerManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenShiftControllerManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerList. +func (in *OpenShiftControllerManagerList) DeepCopy() *OpenShiftControllerManagerList { + if in == nil { + return nil + } + out := new(OpenShiftControllerManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftControllerManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManagerSpec) DeepCopyInto(out *OpenShiftControllerManagerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerSpec. +func (in *OpenShiftControllerManagerSpec) DeepCopy() *OpenShiftControllerManagerSpec { + if in == nil { + return nil + } + out := new(OpenShiftControllerManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManagerStatus) DeepCopyInto(out *OpenShiftControllerManagerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerStatus. +func (in *OpenShiftControllerManagerStatus) DeepCopy() *OpenShiftControllerManagerStatus { + if in == nil { + return nil + } + out := new(OpenShiftControllerManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftSDNConfig) DeepCopyInto(out *OpenShiftSDNConfig) { + *out = *in + if in.VXLANPort != nil { + in, out := &in.VXLANPort, &out.VXLANPort + *out = new(uint32) + **out = **in + } + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(uint32) + **out = **in + } + if in.UseExternalOpenvswitch != nil { + in, out := &in.UseExternalOpenvswitch, &out.UseExternalOpenvswitch + *out = new(bool) + **out = **in + } + if in.EnableUnidling != nil { + in, out := &in.EnableUnidling, &out.EnableUnidling + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftSDNConfig. +func (in *OpenShiftSDNConfig) DeepCopy() *OpenShiftSDNConfig { + if in == nil { + return nil + } + out := new(OpenShiftSDNConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorCondition) DeepCopyInto(out *OperatorCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorCondition. +func (in *OperatorCondition) DeepCopy() *OperatorCondition { + if in == nil { + return nil + } + out := new(OperatorCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorSpec) DeepCopyInto(out *OperatorSpec) { + *out = *in + in.UnsupportedConfigOverrides.DeepCopyInto(&out.UnsupportedConfigOverrides) + in.ObservedConfig.DeepCopyInto(&out.ObservedConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorSpec. +func (in *OperatorSpec) DeepCopy() *OperatorSpec { + if in == nil { + return nil + } + out := new(OperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorStatus) DeepCopyInto(out *OperatorStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Generations != nil { + in, out := &in.Generations, &out.Generations + *out = make([]GenerationStatus, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorStatus. +func (in *OperatorStatus) DeepCopy() *OperatorStatus { + if in == nil { + return nil + } + out := new(OperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateStrategy) DeepCopyInto(out *PrivateStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateStrategy. +func (in *PrivateStrategy) DeepCopy() *PrivateStrategy { + if in == nil { + return nil + } + out := new(PrivateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ProxyArgumentList) DeepCopyInto(out *ProxyArgumentList) { + { + in := &in + *out = make(ProxyArgumentList, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyArgumentList. +func (in ProxyArgumentList) DeepCopy() ProxyArgumentList { + if in == nil { + return nil + } + out := new(ProxyArgumentList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { + *out = *in + if in.ProxyArguments != nil { + in, out := &in.ProxyArguments, &out.ProxyArguments + *out = make(map[string]ProxyArgumentList, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ProxyArgumentList, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. +func (in *ProxyConfig) DeepCopy() *ProxyConfig { + if in == nil { + return nil + } + out := new(ProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteAdmissionPolicy) DeepCopyInto(out *RouteAdmissionPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteAdmissionPolicy. +func (in *RouteAdmissionPolicy) DeepCopy() *RouteAdmissionPolicy { + if in == nil { + return nil + } + out := new(RouteAdmissionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Server) DeepCopyInto(out *Server) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.ForwardPlugin.DeepCopyInto(&out.ForwardPlugin) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server. +func (in *Server) DeepCopy() *Server { + if in == nil { + return nil + } + out := new(Server) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCA) DeepCopyInto(out *ServiceCA) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCA. +func (in *ServiceCA) DeepCopy() *ServiceCA { + if in == nil { + return nil + } + out := new(ServiceCA) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCA) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCAList) DeepCopyInto(out *ServiceCAList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCA, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCAList. +func (in *ServiceCAList) DeepCopy() *ServiceCAList { + if in == nil { + return nil + } + out := new(ServiceCAList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCAList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCASpec) DeepCopyInto(out *ServiceCASpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCASpec. +func (in *ServiceCASpec) DeepCopy() *ServiceCASpec { + if in == nil { + return nil + } + out := new(ServiceCASpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCAStatus) DeepCopyInto(out *ServiceCAStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCAStatus. +func (in *ServiceCAStatus) DeepCopy() *ServiceCAStatus { + if in == nil { + return nil + } + out := new(ServiceCAStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServer) DeepCopyInto(out *ServiceCatalogAPIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServer. +func (in *ServiceCatalogAPIServer) DeepCopy() *ServiceCatalogAPIServer { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogAPIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServerList) DeepCopyInto(out *ServiceCatalogAPIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCatalogAPIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerList. +func (in *ServiceCatalogAPIServerList) DeepCopy() *ServiceCatalogAPIServerList { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogAPIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServerSpec) DeepCopyInto(out *ServiceCatalogAPIServerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerSpec. +func (in *ServiceCatalogAPIServerSpec) DeepCopy() *ServiceCatalogAPIServerSpec { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServerStatus) DeepCopyInto(out *ServiceCatalogAPIServerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerStatus. +func (in *ServiceCatalogAPIServerStatus) DeepCopy() *ServiceCatalogAPIServerStatus { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManager) DeepCopyInto(out *ServiceCatalogControllerManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManager. +func (in *ServiceCatalogControllerManager) DeepCopy() *ServiceCatalogControllerManager { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogControllerManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManagerList) DeepCopyInto(out *ServiceCatalogControllerManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCatalogControllerManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerList. +func (in *ServiceCatalogControllerManagerList) DeepCopy() *ServiceCatalogControllerManagerList { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogControllerManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManagerSpec) DeepCopyInto(out *ServiceCatalogControllerManagerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerSpec. +func (in *ServiceCatalogControllerManagerSpec) DeepCopy() *ServiceCatalogControllerManagerSpec { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManagerStatus) DeepCopyInto(out *ServiceCatalogControllerManagerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerStatus. +func (in *ServiceCatalogControllerManagerStatus) DeepCopy() *ServiceCatalogControllerManagerStatus { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SimpleMacvlanConfig) DeepCopyInto(out *SimpleMacvlanConfig) { + *out = *in + if in.IPAMConfig != nil { + in, out := &in.IPAMConfig, &out.IPAMConfig + *out = new(IPAMConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleMacvlanConfig. +func (in *SimpleMacvlanConfig) DeepCopy() *SimpleMacvlanConfig { + if in == nil { + return nil + } + out := new(SimpleMacvlanConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMAddresses) DeepCopyInto(out *StaticIPAMAddresses) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMAddresses. +func (in *StaticIPAMAddresses) DeepCopy() *StaticIPAMAddresses { + if in == nil { + return nil + } + out := new(StaticIPAMAddresses) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMConfig) DeepCopyInto(out *StaticIPAMConfig) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]StaticIPAMAddresses, len(*in)) + copy(*out, *in) + } + if in.Routes != nil { + in, out := &in.Routes, &out.Routes + *out = make([]StaticIPAMRoutes, len(*in)) + copy(*out, *in) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(StaticIPAMDNS) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMConfig. +func (in *StaticIPAMConfig) DeepCopy() *StaticIPAMConfig { + if in == nil { + return nil + } + out := new(StaticIPAMConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMDNS) DeepCopyInto(out *StaticIPAMDNS) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Search != nil { + in, out := &in.Search, &out.Search + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMDNS. +func (in *StaticIPAMDNS) DeepCopy() *StaticIPAMDNS { + if in == nil { + return nil + } + out := new(StaticIPAMDNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMRoutes) DeepCopyInto(out *StaticIPAMRoutes) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMRoutes. +func (in *StaticIPAMRoutes) DeepCopy() *StaticIPAMRoutes { + if in == nil { + return nil + } + out := new(StaticIPAMRoutes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticPodOperatorSpec) DeepCopyInto(out *StaticPodOperatorSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticPodOperatorSpec. +func (in *StaticPodOperatorSpec) DeepCopy() *StaticPodOperatorSpec { + if in == nil { + return nil + } + out := new(StaticPodOperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticPodOperatorStatus) DeepCopyInto(out *StaticPodOperatorStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + if in.NodeStatuses != nil { + in, out := &in.NodeStatuses, &out.NodeStatuses + *out = make([]NodeStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticPodOperatorStatus. +func (in *StaticPodOperatorStatus) DeepCopy() *StaticPodOperatorStatus { + if in == nil { + return nil + } + out := new(StaticPodOperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatuspageProvider) DeepCopyInto(out *StatuspageProvider) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatuspageProvider. +func (in *StatuspageProvider) DeepCopy() *StatuspageProvider { + if in == nil { + return nil + } + out := new(StatuspageProvider) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..d20358c23be6fe49b6abe7276246d9723973b15d --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,774 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_GenerationStatus = map[string]string{ + "": "GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made.", + "group": "group is the group of the thing you're tracking", + "resource": "resource is the resource type of the thing you're tracking", + "namespace": "namespace is where the thing you're tracking is", + "name": "name is the name of the thing you're tracking", + "lastGeneration": "lastGeneration is the last generation of the workload controller involved", + "hash": "hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps", +} + +func (GenerationStatus) SwaggerDoc() map[string]string { + return map_GenerationStatus +} + +var map_MyOperatorResource = map[string]string{ + "": "MyOperatorResource is an example operator configuration type", +} + +func (MyOperatorResource) SwaggerDoc() map[string]string { + return map_MyOperatorResource +} + +var map_NodeStatus = map[string]string{ + "": "NodeStatus provides information about the current state of a particular node managed by this operator.", + "nodeName": "nodeName is the name of the node", + "currentRevision": "currentRevision is the generation of the most recently successful deployment", + "targetRevision": "targetRevision is the generation of the deployment we're trying to apply", + "lastFailedRevision": "lastFailedRevision is the generation of the deployment we tried and failed to deploy.", + "lastFailedRevisionErrors": "lastFailedRevisionErrors is a list of the errors during the failed deployment referenced in lastFailedRevision", +} + +func (NodeStatus) SwaggerDoc() map[string]string { + return map_NodeStatus +} + +var map_OperatorCondition = map[string]string{ + "": "OperatorCondition is just the standard condition fields.", +} + +func (OperatorCondition) SwaggerDoc() map[string]string { + return map_OperatorCondition +} + +var map_OperatorSpec = map[string]string{ + "": "OperatorSpec contains common fields operators need. It is intended to be anonymous included inside of the Spec struct for your particular operator.", + "managementState": "managementState indicates whether and how the operator should manage the component", + "logLevel": "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands.", + "operatorLogLevel": "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.", + "unsupportedConfigOverrides": "unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides", + "observedConfig": "observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator", +} + +func (OperatorSpec) SwaggerDoc() map[string]string { + return map_OperatorSpec +} + +var map_OperatorStatus = map[string]string{ + "observedGeneration": "observedGeneration is the last generation change you've dealt with", + "conditions": "conditions is a list of conditions and their status", + "version": "version is the level this availability applies to", + "readyReplicas": "readyReplicas indicates how many replicas are ready and at the desired state", + "generations": "generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction.", +} + +func (OperatorStatus) SwaggerDoc() map[string]string { + return map_OperatorStatus +} + +var map_StaticPodOperatorSpec = map[string]string{ + "": "StaticPodOperatorSpec is spec for controllers that manage static pods.", + "forceRedeploymentReason": "forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", + "failedRevisionLimit": "failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", + "succeededRevisionLimit": "succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", +} + +func (StaticPodOperatorSpec) SwaggerDoc() map[string]string { + return map_StaticPodOperatorSpec +} + +var map_StaticPodOperatorStatus = map[string]string{ + "": "StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual node status must be tracked.", + "latestAvailableRevision": "latestAvailableRevision is the deploymentID of the most recent deployment", + "latestAvailableRevisionReason": "latestAvailableRevisionReason describe the detailed reason for the most recent deployment", + "nodeStatuses": "nodeStatuses track the deployment values and errors across individual nodes", +} + +func (StaticPodOperatorStatus) SwaggerDoc() map[string]string { + return map_StaticPodOperatorStatus +} + +var map_Authentication = map[string]string{ + "": "Authentication provides information to configure an operator to manage authentication.", +} + +func (Authentication) SwaggerDoc() map[string]string { + return map_Authentication +} + +var map_AuthenticationList = map[string]string{ + "": "AuthenticationList is a collection of items", +} + +func (AuthenticationList) SwaggerDoc() map[string]string { + return map_AuthenticationList +} + +var map_AuthenticationStatus = map[string]string{ + "managingOAuthAPIServer": "ManagingOAuthAPIServer indicates whether this operator is managing OAuth related APIs. Setting this field to true will cause OAS-O to step down. Note that this field will be removed in the future releases, once https://github.com/openshift/enhancements/blob/master/enhancements/authentication/separate-oauth-resources.md is fully implemented", +} + +func (AuthenticationStatus) SwaggerDoc() map[string]string { + return map_AuthenticationStatus +} + +var map_Console = map[string]string{ + "": "Console provides a means to configure an operator to manage the console.", +} + +func (Console) SwaggerDoc() map[string]string { + return map_Console +} + +var map_ConsoleCustomization = map[string]string{ + "": "ConsoleCustomization defines a list of optional configuration for the console UI.", + "brand": "brand is the default branding of the web console which can be overridden by providing the brand field. There is a limited set of specific brand options. This field controls elements of the console such as the logo. Invalid value will prevent a console rollout.", + "documentationBaseURL": "documentationBaseURL links to external documentation are shown in various sections of the web console. Providing documentationBaseURL will override the default documentation URL. Invalid value will prevent a console rollout.", + "customProductName": "customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog instead of the normal OpenShift product name.", + "customLogoFile": "customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a ConfigMap in the openshift-config namespace. This can be created with a command like 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. Image size must be less than 1 MB due to constraints on the ConfigMap size. The ConfigMap key should include a file extension so that the console serves the file with the correct MIME type. Recommended logo specifications: Dimensions: Max height of 68px and max width of 200px SVG format preferred", +} + +func (ConsoleCustomization) SwaggerDoc() map[string]string { + return map_ConsoleCustomization +} + +var map_ConsoleProviders = map[string]string{ + "": "ConsoleProviders defines a list of optional additional providers of functionality to the console.", + "statuspage": "statuspage contains ID for statuspage.io page that provides status info about.", +} + +func (ConsoleProviders) SwaggerDoc() map[string]string { + return map_ConsoleProviders +} + +var map_ConsoleSpec = map[string]string{ + "": "ConsoleSpec is the specification of the desired behavior of the Console.", + "customization": "customization is used to optionally provide a small set of customization options to the web console.", + "providers": "providers contains configuration for using specific service providers.", +} + +func (ConsoleSpec) SwaggerDoc() map[string]string { + return map_ConsoleSpec +} + +var map_ConsoleStatus = map[string]string{ + "": "ConsoleStatus defines the observed status of the Console.", +} + +func (ConsoleStatus) SwaggerDoc() map[string]string { + return map_ConsoleStatus +} + +var map_StatuspageProvider = map[string]string{ + "": "StatuspageProvider provides identity for statuspage account.", + "pageID": "pageID is the unique ID assigned by Statuspage for your page. This must be a public page.", +} + +func (StatuspageProvider) SwaggerDoc() map[string]string { + return map_StatuspageProvider +} + +var map_CSISnapshotController = map[string]string{ + "": "CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name.", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (CSISnapshotController) SwaggerDoc() map[string]string { + return map_CSISnapshotController +} + +var map_CSISnapshotControllerList = map[string]string{ + "": "CSISnapshotControllerList contains a list of CSISnapshotControllers.", +} + +func (CSISnapshotControllerList) SwaggerDoc() map[string]string { + return map_CSISnapshotControllerList +} + +var map_CSISnapshotControllerSpec = map[string]string{ + "": "CSISnapshotControllerSpec is the specification of the desired behavior of the CSISnapshotController operator.", +} + +func (CSISnapshotControllerSpec) SwaggerDoc() map[string]string { + return map_CSISnapshotControllerSpec +} + +var map_CSISnapshotControllerStatus = map[string]string{ + "": "CSISnapshotControllerStatus defines the observed status of the CSISnapshotController operator.", +} + +func (CSISnapshotControllerStatus) SwaggerDoc() map[string]string { + return map_CSISnapshotControllerStatus +} + +var map_DNS = map[string]string{ + "": "DNS manages the CoreDNS component to provide a name resolution service for pods and services in the cluster.\n\nThis supports the DNS-based service discovery specification: https://github.com/kubernetes/dns/blob/master/docs/specification.md\n\nMore details: https://kubernetes.io/docs/tasks/administer-cluster/coredns", + "spec": "spec is the specification of the desired behavior of the DNS.", + "status": "status is the most recently observed status of the DNS.", +} + +func (DNS) SwaggerDoc() map[string]string { + return map_DNS +} + +var map_DNSList = map[string]string{ + "": "DNSList contains a list of DNS", +} + +func (DNSList) SwaggerDoc() map[string]string { + return map_DNSList +} + +var map_DNSSpec = map[string]string{ + "": "DNSSpec is the specification of the desired behavior of the DNS.", + "servers": "servers is a list of DNS resolvers that provide name query delegation for one or more subdomains outside the scope of the cluster domain. If servers consists of more than one Server, longest suffix match will be used to determine the Server.\n\nFor example, if there are two Servers, one for \"foo.com\" and another for \"a.foo.com\", and the name query is for \"www.a.foo.com\", it will be routed to the Server with Zone \"a.foo.com\".\n\nIf this field is nil, no servers are created.", +} + +func (DNSSpec) SwaggerDoc() map[string]string { + return map_DNSSpec +} + +var map_DNSStatus = map[string]string{ + "": "DNSStatus defines the observed status of the DNS.", + "clusterIP": "clusterIP is the service IP through which this DNS is made available.\n\nIn the case of the default DNS, this will be a well known IP that is used as the default nameserver for pods that are using the default ClusterFirst DNS policy.\n\nIn general, this IP can be specified in a pod's spec.dnsConfig.nameservers list or used explicitly when performing name resolution from within the cluster. Example: dig foo.com @<service IP>\n\nMore info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "clusterDomain": "clusterDomain is the local cluster DNS domain suffix for DNS services. This will be a subdomain as defined in RFC 1034, section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 Example: \"cluster.local\"\n\nMore info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service", + "conditions": "conditions provide information about the state of the DNS on the cluster.\n\nThese are the supported DNS conditions:\n\n * Available\n - True if the following conditions are met:\n * DNS controller daemonset is available.\n - False if any of those conditions are unsatisfied.", +} + +func (DNSStatus) SwaggerDoc() map[string]string { + return map_DNSStatus +} + +var map_ForwardPlugin = map[string]string{ + "": "ForwardPlugin defines a schema for configuring the CoreDNS forward plugin.", + "upstreams": "upstreams is a list of resolvers to forward name queries for subdomains of Zones. Upstreams are randomized when more than 1 upstream is specified. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. Each upstream is represented by an IP address or IP:port if the upstream listens on a port other than 53.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin.", +} + +func (ForwardPlugin) SwaggerDoc() map[string]string { + return map_ForwardPlugin +} + +var map_Server = map[string]string{ + "": "Server defines the schema for a server that runs per instance of CoreDNS.", + "name": "name is required and specifies a unique name for the server. Name must comply with the Service Name Syntax of rfc6335.", + "zones": "zones is required and specifies the subdomains that Server is authoritative for. Zones must conform to the rfc1123 definition of a subdomain. Specifying the cluster domain (i.e., \"cluster.local\") is invalid.", + "forwardPlugin": "forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers.", +} + +func (Server) SwaggerDoc() map[string]string { + return map_Server +} + +var map_Etcd = map[string]string{ + "": "Etcd provides information to configure an operator to manage kube-apiserver.", +} + +func (Etcd) SwaggerDoc() map[string]string { + return map_Etcd +} + +var map_EtcdList = map[string]string{ + "": "KubeAPISOperatorConfigList is a collection of items", + "items": "Items contains the items", +} + +func (EtcdList) SwaggerDoc() map[string]string { + return map_EtcdList +} + +var map_EndpointPublishingStrategy = map[string]string{ + "": "EndpointPublishingStrategy is a way to publish the endpoints of an IngressController, and represents the type and any additional configuration for a specific type.", + "type": "type is the publishing strategy to use. Valid values are:\n\n* LoadBalancerService\n\nPublishes the ingress controller using a Kubernetes LoadBalancer Service.\n\nIn this configuration, the ingress controller deployment uses container networking. A LoadBalancer Service is created to publish the deployment.\n\nSee: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer\n\nIf domain is set, a wildcard DNS record will be managed to point at the LoadBalancer Service's external name. DNS records are managed only in DNS zones defined by dns.config.openshift.io/cluster .spec.publicZone and .spec.privateZone.\n\nWildcard DNS management is currently supported only on the AWS, Azure, and GCP platforms.\n\n* HostNetwork\n\nPublishes the ingress controller on node ports where the ingress controller is deployed.\n\nIn this configuration, the ingress controller deployment uses host networking, bound to node ports 80 and 443. The user is responsible for configuring an external load balancer to publish the ingress controller via the node ports.\n\n* Private\n\nDoes not publish the ingress controller.\n\nIn this configuration, the ingress controller deployment uses container networking, and is not explicitly published. The user must manually publish the ingress controller.\n\n* NodePortService\n\nPublishes the ingress controller using a Kubernetes NodePort Service.\n\nIn this configuration, the ingress controller deployment uses container networking. A NodePort Service is created to publish the deployment. The specific node ports are dynamically allocated by OpenShift; however, to support static port allocations, user changes to the node port field of the managed NodePort Service will preserved.", + "loadBalancer": "loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService.", + "hostNetwork": "hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork.", + "private": "private holds parameters for the Private endpoint publishing strategy. Present only if type is Private.", + "nodePort": "nodePort holds parameters for the NodePortService endpoint publishing strategy. Present only if type is NodePortService.", +} + +func (EndpointPublishingStrategy) SwaggerDoc() map[string]string { + return map_EndpointPublishingStrategy +} + +var map_HostNetworkStrategy = map[string]string{ + "": "HostNetworkStrategy holds parameters for the HostNetwork endpoint publishing strategy.", +} + +func (HostNetworkStrategy) SwaggerDoc() map[string]string { + return map_HostNetworkStrategy +} + +var map_IngressController = map[string]string{ + "": "IngressController describes a managed ingress controller for the cluster. The controller can service OpenShift Route and Kubernetes Ingress resources.\n\nWhen an IngressController is created, a new ingress controller deployment is created to allow external traffic to reach the services that expose Ingress or Route resources. Updating this resource may lead to disruption for public facing network connections as a new ingress controller revision may be rolled out.\n\nhttps://kubernetes.io/docs/concepts/services-networking/ingress-controllers\n\nWhenever possible, sensible defaults for the platform are used. See each field for more details.", + "spec": "spec is the specification of the desired behavior of the IngressController.", + "status": "status is the most recently observed status of the IngressController.", +} + +func (IngressController) SwaggerDoc() map[string]string { + return map_IngressController +} + +var map_IngressControllerList = map[string]string{ + "": "IngressControllerList contains a list of IngressControllers.", +} + +func (IngressControllerList) SwaggerDoc() map[string]string { + return map_IngressControllerList +} + +var map_IngressControllerSpec = map[string]string{ + "": "IngressControllerSpec is the specification of the desired behavior of the IngressController.", + "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.", + "replicas": "replicas is the desired number of ingress controller replicas. If unset, defaults to 2.", + "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService (with External scope)\n Azure: LoadBalancerService (with External scope)\n GCP: LoadBalancerService (with External scope)\n IBMCloud: LoadBalancerService (with External scope)\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", + "defaultCertificate": "defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used.\n\nThe secret must contain the following keys and data:\n\n tls.crt: certificate file contents\n tls.key: key file contents\n\nIf unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store.\n\nThe in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server.", + "namespaceSelector": "namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", + "routeSelector": "routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", + "nodePlacement": "nodePlacement enables explicit control over the scheduling of the ingress controller.\n\nIf unset, defaults are used. See NodePlacement for more details.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers.\n\nIf unset, the default is based on the apiservers.config.openshift.io/cluster resource.\n\nNote that when using the Old, Intermediate, and Modern profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress controller, resulting in a rollout.\n\nNote that the minimum TLS version for ingress controllers is 1.1, and the maximum TLS version is 1.2. An implication of this restriction is that the Modern TLS profile type cannot be used because it requires TLS 1.3.", + "routeAdmission": "routeAdmission defines a policy for handling new route claims (for example, to allow or deny claims across namespaces).\n\nIf empty, defaults will be applied. See specific routeAdmission fields for details about their defaults.", +} + +func (IngressControllerSpec) SwaggerDoc() map[string]string { + return map_IngressControllerSpec +} + +var map_IngressControllerStatus = map[string]string{ + "": "IngressControllerStatus defines the observed status of the IngressController.", + "availableReplicas": "availableReplicas is number of observed available replicas according to the ingress controller deployment.", + "selector": "selector is a label selector, in string format, for ingress controller pods corresponding to the IngressController. The number of matching pods should equal the value of availableReplicas.", + "domain": "domain is the actual domain in use.", + "endpointPublishingStrategy": "endpointPublishingStrategy is the actual strategy in use.", + "conditions": "conditions is a list of conditions and their status.\n\nAvailable means the ingress controller deployment is available and servicing route and ingress resources (i.e, .status.availableReplicas equals .spec.replicas)\n\nThere are additional conditions which indicate the status of other ingress controller features and capabilities.\n\n * LoadBalancerManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy requires a service load balancer.\n - False if any of those conditions are unsatisfied.\n\n * LoadBalancerReady\n - True if the following conditions are met:\n * A load balancer is managed.\n * The load balancer is ready.\n - False if any of those conditions are unsatisfied.\n\n * DNSManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy and platform support DNS.\n * The ingress controller domain is set.\n * dns.config.openshift.io/cluster configures DNS zones.\n - False if any of those conditions are unsatisfied.\n\n * DNSReady\n - True if the following conditions are met:\n * DNS is managed.\n * DNS records have been successfully created.\n - False if any of those conditions are unsatisfied.", + "tlsProfile": "tlsProfile is the TLS connection configuration that is in effect.", + "observedGeneration": "observedGeneration is the most recent generation observed.", +} + +func (IngressControllerStatus) SwaggerDoc() map[string]string { + return map_IngressControllerStatus +} + +var map_LoadBalancerStrategy = map[string]string{ + "": "LoadBalancerStrategy holds parameters for a load balancer.", + "scope": "scope indicates the scope at which the load balancer is exposed. Possible values are \"External\" and \"Internal\".", +} + +func (LoadBalancerStrategy) SwaggerDoc() map[string]string { + return map_LoadBalancerStrategy +} + +var map_NodePlacement = map[string]string{ + "": "NodePlacement describes node scheduling configuration for an ingress controller.", + "nodeSelector": "nodeSelector is the node selector applied to ingress controller deployments.\n\nIf unset, the default is:\n\n beta.kubernetes.io/os: linux\n node-role.kubernetes.io/worker: ''\n\nIf set, the specified selector is used and replaces the default.", + "tolerations": "tolerations is a list of tolerations applied to ingress controller deployments.\n\nThe default is an empty list.\n\nSee https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/", +} + +func (NodePlacement) SwaggerDoc() map[string]string { + return map_NodePlacement +} + +var map_NodePortStrategy = map[string]string{ + "": "NodePortStrategy holds parameters for the NodePortService endpoint publishing strategy.", +} + +func (NodePortStrategy) SwaggerDoc() map[string]string { + return map_NodePortStrategy +} + +var map_PrivateStrategy = map[string]string{ + "": "PrivateStrategy holds parameters for the Private endpoint publishing strategy.", +} + +func (PrivateStrategy) SwaggerDoc() map[string]string { + return map_PrivateStrategy +} + +var map_RouteAdmissionPolicy = map[string]string{ + "": "RouteAdmissionPolicy is an admission policy for allowing new route claims.", + "namespaceOwnership": "namespaceOwnership describes how host name claims across namespaces should be handled.\n\nValue must be one of:\n\n- Strict: Do not allow routes in different namespaces to claim the same host.\n\n- InterNamespaceAllowed: Allow routes to claim different paths of the same\n host name across namespaces.\n\nIf empty, the default is Strict.", +} + +func (RouteAdmissionPolicy) SwaggerDoc() map[string]string { + return map_RouteAdmissionPolicy +} + +var map_KubeAPIServer = map[string]string{ + "": "KubeAPIServer provides information to configure an operator to manage kube-apiserver.", + "spec": "spec is the specification of the desired behavior of the Kubernetes API Server", + "status": "status is the most recently observed status of the Kubernetes API Server", +} + +func (KubeAPIServer) SwaggerDoc() map[string]string { + return map_KubeAPIServer +} + +var map_KubeAPIServerList = map[string]string{ + "": "KubeAPIServerList is a collection of items", + "items": "Items contains the items", +} + +func (KubeAPIServerList) SwaggerDoc() map[string]string { + return map_KubeAPIServerList +} + +var map_KubeControllerManager = map[string]string{ + "": "KubeControllerManager provides information to configure an operator to manage kube-controller-manager.", + "spec": "spec is the specification of the desired behavior of the Kubernetes Controller Manager", + "status": "status is the most recently observed status of the Kubernetes Controller Manager", +} + +func (KubeControllerManager) SwaggerDoc() map[string]string { + return map_KubeControllerManager +} + +var map_KubeControllerManagerList = map[string]string{ + "": "KubeControllerManagerList is a collection of items", + "items": "Items contains the items", +} + +func (KubeControllerManagerList) SwaggerDoc() map[string]string { + return map_KubeControllerManagerList +} + +var map_KubeStorageVersionMigrator = map[string]string{ + "": "KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator.", +} + +func (KubeStorageVersionMigrator) SwaggerDoc() map[string]string { + return map_KubeStorageVersionMigrator +} + +var map_KubeStorageVersionMigratorList = map[string]string{ + "": "KubeStorageVersionMigratorList is a collection of items", + "items": "Items contains the items", +} + +func (KubeStorageVersionMigratorList) SwaggerDoc() map[string]string { + return map_KubeStorageVersionMigratorList +} + +var map_AdditionalNetworkDefinition = map[string]string{ + "": "AdditionalNetworkDefinition configures an extra network that is available but not created by default. Instead, pods must request them by name. type must be specified, along with exactly one \"Config\" that matches the type.", + "type": "type is the type of network The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan", + "name": "name is the name of the network. This will be populated in the resulting CRD This must be unique.", + "namespace": "namespace is the namespace of the network. This will be populated in the resulting CRD If not given the network will be created in the default namespace.", + "rawCNIConfig": "rawCNIConfig is the raw CNI configuration json to create in the NetworkAttachmentDefinition CRD", + "simpleMacvlanConfig": "SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan", +} + +func (AdditionalNetworkDefinition) SwaggerDoc() map[string]string { + return map_AdditionalNetworkDefinition +} + +var map_ClusterNetworkEntry = map[string]string{ + "": "ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. Not all network providers support multiple ClusterNetworks", +} + +func (ClusterNetworkEntry) SwaggerDoc() map[string]string { + return map_ClusterNetworkEntry +} + +var map_DefaultNetworkDefinition = map[string]string{ + "": "DefaultNetworkDefinition represents a single network plugin's configuration. type must be specified, along with exactly one \"Config\" that matches the type.", + "type": "type is the type of network All NetworkTypes are supported except for NetworkTypeRaw", + "openshiftSDNConfig": "openShiftSDNConfig configures the openshift-sdn plugin", + "ovnKubernetesConfig": "oVNKubernetesConfig configures the ovn-kubernetes plugin. This is currently not implemented.", + "kuryrConfig": "KuryrConfig configures the kuryr plugin", +} + +func (DefaultNetworkDefinition) SwaggerDoc() map[string]string { + return map_DefaultNetworkDefinition +} + +var map_HybridOverlayConfig = map[string]string{ + "hybridClusterNetwork": "HybridClusterNetwork defines a network space given to nodes on an additional overlay network.", +} + +func (HybridOverlayConfig) SwaggerDoc() map[string]string { + return map_HybridOverlayConfig +} + +var map_IPAMConfig = map[string]string{ + "": "IPAMConfig contains configurations for IPAM (IP Address Management)", + "type": "Type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic", + "staticIPAMConfig": "StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic", +} + +func (IPAMConfig) SwaggerDoc() map[string]string { + return map_IPAMConfig +} + +var map_KuryrConfig = map[string]string{ + "": "KuryrConfig configures the Kuryr-Kubernetes SDN", + "daemonProbesPort": "The port kuryr-daemon will listen for readiness and liveness requests.", + "controllerProbesPort": "The port kuryr-controller will listen for readiness and liveness requests.", + "openStackServiceNetwork": "openStackServiceNetwork contains the CIDR of network from which to allocate IPs for OpenStack Octavia's Amphora VMs. Please note that with Amphora driver Octavia uses two IPs from that network for each loadbalancer - one given by OpenShift and second for VRRP connections. As the first one is managed by OpenShift's and second by Neutron's IPAMs, those need to come from different pools. Therefore `openStackServiceNetwork` needs to be at least twice the size of `serviceNetwork`, and whole `serviceNetwork` must be overlapping with `openStackServiceNetwork`. cluster-network-operator will then make sure VRRP IPs are taken from the ranges inside `openStackServiceNetwork` that are not overlapping with `serviceNetwork`, effectivly preventing conflicts. If not set cluster-network-operator will use `serviceNetwork` expanded by decrementing the prefix size by 1.", + "enablePortPoolsPrepopulation": "enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact that it takes a significant amount of time to create one. Instead of creating it when pod is being deployed, Kuryr keeps a number of ports ready to be attached to pods. By default port prepopulation is disabled.", + "poolMaxPorts": "poolMaxPorts sets a maximum number of free ports that are being kept in a port pool. If the number of ports exceeds this setting, free ports will get deleted. Setting 0 will disable this upper bound, effectively preventing pools from shrinking and this is the default value. For more information about port pools see enablePortPoolsPrepopulation setting.", + "poolMinPorts": "poolMinPorts sets a minimum number of free ports that should be kept in a port pool. If the number of ports is lower than this setting, new ports will get created and added to pool. The default is 1. For more information about port pools see enablePortPoolsPrepopulation setting.", + "poolBatchPorts": "poolBatchPorts sets a number of ports that should be created in a single batch request to extend the port pool. The default is 3. For more information about port pools see enablePortPoolsPrepopulation setting.", +} + +func (KuryrConfig) SwaggerDoc() map[string]string { + return map_KuryrConfig +} + +var map_Network = map[string]string{ + "": "Network describes the cluster's desired network configuration. It is consumed by the cluster-network-operator.", +} + +func (Network) SwaggerDoc() map[string]string { + return map_Network +} + +var map_NetworkList = map[string]string{ + "": "NetworkList contains a list of Network configurations", +} + +func (NetworkList) SwaggerDoc() map[string]string { + return map_NetworkList +} + +var map_NetworkSpec = map[string]string{ + "": "NetworkSpec is the top-level network configuration object.", + "clusterNetwork": "clusterNetwork is the IP address pool to use for pod IPs. Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. Others only support one. This is equivalent to the cluster-cidr.", + "serviceNetwork": "serviceNetwork is the ip address pool to use for Service IPs Currently, all existing network providers only support a single value here, but this is an array to allow for growth.", + "defaultNetwork": "defaultNetwork is the \"default\" network that all pods will receive", + "additionalNetworks": "additionalNetworks is a list of extra networks to make available to pods when multiple networks are enabled.", + "disableMultiNetwork": "disableMultiNetwork specifies whether or not multiple pod network support should be disabled. If unset, this property defaults to 'false' and multiple network support is enabled.", + "deployKubeProxy": "deployKubeProxy specifies whether or not a standalone kube-proxy should be deployed by the operator. Some network providers include kube-proxy or similar functionality. If unset, the plugin will attempt to select the correct value, which is false when OpenShift SDN and ovn-kubernetes are used and true otherwise.", + "kubeProxyConfig": "kubeProxyConfig lets us configure desired proxy configuration. If not specified, sensible defaults will be chosen by OpenShift directly. Not consumed by all network providers - currently only openshift-sdn.", + "logLevel": "logLevel allows configuring the logging level of the components deployed by the operator. Currently only Kuryr SDN is affected by this setting. Please note that turning on extensive logging may affect performance. The default value is \"Normal\".", +} + +func (NetworkSpec) SwaggerDoc() map[string]string { + return map_NetworkSpec +} + +var map_NetworkStatus = map[string]string{ + "": "NetworkStatus is currently unused. Instead, status is reported in the Network.config.openshift.io object.", +} + +func (NetworkStatus) SwaggerDoc() map[string]string { + return map_NetworkStatus +} + +var map_OVNKubernetesConfig = map[string]string{ + "": "ovnKubernetesConfig contains the configuration parameters for networks using the ovn-kubernetes network project", + "mtu": "mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400", + "genevePort": "geneve port is the UDP port to be used by geneve encapulation. Default is 6081", + "hybridOverlayConfig": "HybridOverlayConfig configures an additional overlay network for peers that are not using OVN.", +} + +func (OVNKubernetesConfig) SwaggerDoc() map[string]string { + return map_OVNKubernetesConfig +} + +var map_OpenShiftSDNConfig = map[string]string{ + "": "OpenShiftSDNConfig configures the three openshift-sdn plugins", + "mode": "mode is one of \"Multitenant\", \"Subnet\", or \"NetworkPolicy\"", + "vxlanPort": "vxlanPort is the port to use for all vxlan packets. The default is 4789.", + "mtu": "mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. This must be 50 bytes smaller than the machine's uplink.", + "useExternalOpenvswitch": "useExternalOpenvswitch tells the operator not to install openvswitch, because it will be provided separately. If set, you must provide it yourself.", + "enableUnidling": "enableUnidling controls whether or not the service proxy will support idling and unidling of services. By default, unidling is enabled.", +} + +func (OpenShiftSDNConfig) SwaggerDoc() map[string]string { + return map_OpenShiftSDNConfig +} + +var map_ProxyConfig = map[string]string{ + "": "ProxyConfig defines the configuration knobs for kubeproxy All of these are optional and have sensible defaults", + "iptablesSyncPeriod": "The period that iptables rules are refreshed. Default: 30s", + "bindAddress": "The address to \"bind\" on Defaults to 0.0.0.0", + "proxyArguments": "Any additional arguments to pass to the kubeproxy process", +} + +func (ProxyConfig) SwaggerDoc() map[string]string { + return map_ProxyConfig +} + +var map_SimpleMacvlanConfig = map[string]string{ + "": "SimpleMacvlanConfig contains configurations for macvlan interface.", + "master": "master is the host interface to create the macvlan interface from. If not specified, it will be default route interface", + "ipamConfig": "IPAMConfig configures IPAM module will be used for IP Address Management (IPAM).", + "mode": "mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge", + "mtu": "mtu is the mtu to use for the macvlan interface. if unset, host's kernel will select the value.", +} + +func (SimpleMacvlanConfig) SwaggerDoc() map[string]string { + return map_SimpleMacvlanConfig +} + +var map_StaticIPAMAddresses = map[string]string{ + "": "StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses", + "address": "Address is the IP address in CIDR format", + "gateway": "Gateway is IP inside of subnet to designate as the gateway", +} + +func (StaticIPAMAddresses) SwaggerDoc() map[string]string { + return map_StaticIPAMAddresses +} + +var map_StaticIPAMConfig = map[string]string{ + "": "StaticIPAMConfig contains configurations for static IPAM (IP Address Management)", + "addresses": "Addresses configures IP address for the interface", + "routes": "Routes configures IP routes for the interface", + "dns": "DNS configures DNS for the interface", +} + +func (StaticIPAMConfig) SwaggerDoc() map[string]string { + return map_StaticIPAMConfig +} + +var map_StaticIPAMDNS = map[string]string{ + "": "StaticIPAMDNS provides DNS related information for static IPAM", + "nameservers": "Nameservers points DNS servers for IP lookup", + "domain": "Domain configures the domainname the local domain used for short hostname lookups", + "search": "Search configures priority ordered search domains for short hostname lookups", +} + +func (StaticIPAMDNS) SwaggerDoc() map[string]string { + return map_StaticIPAMDNS +} + +var map_StaticIPAMRoutes = map[string]string{ + "": "StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes", + "destination": "Destination points the IP route destination", + "gateway": "Gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin).", +} + +func (StaticIPAMRoutes) SwaggerDoc() map[string]string { + return map_StaticIPAMRoutes +} + +var map_OpenShiftAPIServer = map[string]string{ + "": "OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver.", + "spec": "spec is the specification of the desired behavior of the OpenShift API Server.", + "status": "status defines the observed status of the OpenShift API Server.", +} + +func (OpenShiftAPIServer) SwaggerDoc() map[string]string { + return map_OpenShiftAPIServer +} + +var map_OpenShiftAPIServerList = map[string]string{ + "": "OpenShiftAPIServerList is a collection of items", + "items": "Items contains the items", +} + +func (OpenShiftAPIServerList) SwaggerDoc() map[string]string { + return map_OpenShiftAPIServerList +} + +var map_OpenShiftAPIServerStatus = map[string]string{ + "latestAvailableRevision": "latestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods.", +} + +func (OpenShiftAPIServerStatus) SwaggerDoc() map[string]string { + return map_OpenShiftAPIServerStatus +} + +var map_OpenShiftControllerManager = map[string]string{ + "": "OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager.", +} + +func (OpenShiftControllerManager) SwaggerDoc() map[string]string { + return map_OpenShiftControllerManager +} + +var map_OpenShiftControllerManagerList = map[string]string{ + "": "OpenShiftControllerManagerList is a collection of items", + "items": "Items contains the items", +} + +func (OpenShiftControllerManagerList) SwaggerDoc() map[string]string { + return map_OpenShiftControllerManagerList +} + +var map_KubeScheduler = map[string]string{ + "": "KubeScheduler provides information to configure an operator to manage scheduler.", + "spec": "spec is the specification of the desired behavior of the Kubernetes Scheduler", + "status": "status is the most recently observed status of the Kubernetes Scheduler", +} + +func (KubeScheduler) SwaggerDoc() map[string]string { + return map_KubeScheduler +} + +var map_KubeSchedulerList = map[string]string{ + "": "KubeSchedulerList is a collection of items", + "items": "Items contains the items", +} + +func (KubeSchedulerList) SwaggerDoc() map[string]string { + return map_KubeSchedulerList +} + +var map_ServiceCA = map[string]string{ + "": "ServiceCA provides information to configure an operator to manage the service cert controllers", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (ServiceCA) SwaggerDoc() map[string]string { + return map_ServiceCA +} + +var map_ServiceCAList = map[string]string{ + "": "ServiceCAList is a collection of items", + "items": "Items contains the items", +} + +func (ServiceCAList) SwaggerDoc() map[string]string { + return map_ServiceCAList +} + +var map_ServiceCatalogAPIServer = map[string]string{ + "": "ServiceCatalogAPIServer provides information to configure an operator to manage Service Catalog API Server", +} + +func (ServiceCatalogAPIServer) SwaggerDoc() map[string]string { + return map_ServiceCatalogAPIServer +} + +var map_ServiceCatalogAPIServerList = map[string]string{ + "": "ServiceCatalogAPIServerList is a collection of items", + "items": "Items contains the items", +} + +func (ServiceCatalogAPIServerList) SwaggerDoc() map[string]string { + return map_ServiceCatalogAPIServerList +} + +var map_ServiceCatalogControllerManager = map[string]string{ + "": "ServiceCatalogControllerManager provides information to configure an operator to manage Service Catalog Controller Manager", +} + +func (ServiceCatalogControllerManager) SwaggerDoc() map[string]string { + return map_ServiceCatalogControllerManager +} + +var map_ServiceCatalogControllerManagerList = map[string]string{ + "": "ServiceCatalogControllerManagerList is a collection of items", + "items": "Items contains the items", +} + +func (ServiceCatalogControllerManagerList) SwaggerDoc() map[string]string { + return map_ServiceCatalogControllerManagerList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b28b0415c9a5a2209c0be06c604688f306b57730 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml @@ -0,0 +1,88 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: imagecontentsourcepolicies.operator.openshift.io +spec: + group: operator.openshift.io + scope: Cluster + preserveUnknownFields: false + names: + kind: ImageContentSourcePolicy + singular: imagecontentsourcepolicy + plural: imagecontentsourcepolicies + listKind: ImageContentSourcePolicyList + versions: + - name: v1alpha1 + served: true + storage: true + subresources: + status: {} + "validation": + "openAPIV3Schema": + description: ImageContentSourcePolicy holds cluster-wide information about how + to handle registry mirror rules. When multiple policies are defined, the outcome + of the behavior is defined on each field. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + repositoryDigestMirrors: + description: "repositoryDigestMirrors allows images referenced by image + digests in pods to be pulled from alternative mirrored repository + locations. The image pull specification provided to the pod will be + compared to the source locations described in RepositoryDigestMirrors + and the image may be pulled down from any of the mirrors in the list + instead of the specified repository allowing administrators to choose + a potentially faster mirror. Only image pull specifications that have + an image disgest will have this behavior applied to them - tags will + continue to be pulled from the specified repository in the pull spec. + \n Each “source†repository is treated independently; configurations + for different “source†repositories don’t interact. \n When multiple + policies are defined for the same “source†repository, the sets of + defined mirrors will be merged together, preserving the relative order + of the mirrors, if possible. For example, if policy A has mirrors + `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be + used in the order `a, b, c, d, e`. If the orders of mirror entries + conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected + but the resulting order is unspecified." + type: array + items: + description: 'RepositoryDigestMirrors holds cluster-wide information + about how to handle mirros in the registries config. Note: the mirrors + only work when pulling the images that are referenced by their digests.' + type: object + required: + - source + properties: + mirrors: + description: mirrors is one or more repositories that may also + contain the same images. The order of mirrors in this list is + treated as the user's desired priority, while source is by default + considered lower priority than all mirrors. Other cluster configuration, + including (but not limited to) other repositoryDigestMirrors + objects, may impact the exact order mirrors are contacted in, + or some mirrors may be contacted in parallel, so this should + be considered a preference rather than a guarantee of ordering. + type: array + items: + type: string + source: + description: source is the repository that users refer to, e.g. + in image pull specifications. + type: string diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/doc.go b/vendor/github.com/openshift/api/operator/v1alpha1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..9d18719532c722dc2bf1ed8c2e61014c7c77e1ab --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/doc.go @@ -0,0 +1,6 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=operator.openshift.io +package v1alpha1 diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/register.go b/vendor/github.com/openshift/api/operator/v1alpha1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..3c731f61871f6d947220f14cd1c6c42eefe3f0f3 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/register.go @@ -0,0 +1,41 @@ +package v1alpha1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "operator.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &GenericOperatorConfig{}, + &ImageContentSourcePolicy{}, + &ImageContentSourcePolicyList{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types.go b/vendor/github.com/openshift/api/operator/v1alpha1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..8f2e5be2435f72ecda4e57c45d88520f2bfa64d2 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types.go @@ -0,0 +1,180 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + configv1 "github.com/openshift/api/config/v1" +) + +type ManagementState string + +const ( + // Managed means that the operator is actively managing its resources and trying to keep the component active + Managed ManagementState = "Managed" + // Unmanaged means that the operator is not taking any action related to the component + Unmanaged ManagementState = "Unmanaged" + // Removed means that the operator is actively managing its resources and trying to remove all traces of the component + Removed ManagementState = "Removed" +) + +// OperatorSpec contains common fields for an operator to need. It is intended to be anonymous included +// inside of the Spec struct for you particular operator. +type OperatorSpec struct { + // managementState indicates whether and how the operator should manage the component + ManagementState ManagementState `json:"managementState"` + + // imagePullSpec is the image to use for the component. + ImagePullSpec string `json:"imagePullSpec"` + + // imagePullPolicy specifies the image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, + // or IfNotPresent otherwise. + ImagePullPolicy string `json:"imagePullPolicy"` + + // version is the desired state in major.minor.micro-patch. Usually patch is ignored. + Version string `json:"version"` + + // logging contains glog parameters for the component pods. It's always a command line arg for the moment + Logging LoggingConfig `json:"logging,omitempty"` +} + +// LoggingConfig holds information about configuring logging +type LoggingConfig struct { + // level is passed to glog. + Level int64 `json:"level"` + + // vmodule is passed to glog. + Vmodule string `json:"vmodule"` +} + +type ConditionStatus string + +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" + + // these conditions match the conditions for the ClusterOperator type. + OperatorStatusTypeAvailable = "Available" + OperatorStatusTypeProgressing = "Progressing" + OperatorStatusTypeFailing = "Failing" + + OperatorStatusTypeMigrating = "Migrating" + // TODO this is going to be removed + OperatorStatusTypeSyncSuccessful = "SyncSuccessful" +) + +// OperatorCondition is just the standard condition fields. +type OperatorCondition struct { + Type string `json:"type"` + Status ConditionStatus `json:"status"` + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + Reason string `json:"reason,omitempty"` + Message string `json:"message,omitempty"` +} + +// VersionAvailability gives information about the synchronization and operational status of a particular version of the component +type VersionAvailability struct { + // version is the level this availability applies to + Version string `json:"version"` + // updatedReplicas indicates how many replicas are at the desired state + UpdatedReplicas int32 `json:"updatedReplicas"` + // readyReplicas indicates how many replicas are ready and at the desired state + ReadyReplicas int32 `json:"readyReplicas"` + // errors indicates what failures are associated with the operator trying to manage this version + Errors []string `json:"errors"` + // generations allows an operator to track what the generation of "important" resources was the last time we updated them + Generations []GenerationHistory `json:"generations"` +} + +// GenerationHistory keeps track of the generation for a given resource so that decisions about forced updated can be made. +type GenerationHistory struct { + // group is the group of the thing you're tracking + Group string `json:"group"` + // resource is the resource type of the thing you're tracking + Resource string `json:"resource"` + // namespace is where the thing you're tracking is + Namespace string `json:"namespace"` + // name is the name of the thing you're tracking + Name string `json:"name"` + // lastGeneration is the last generation of the workload controller involved + LastGeneration int64 `json:"lastGeneration"` +} + +// OperatorStatus contains common fields for an operator to need. It is intended to be anonymous included +// inside of the Status struct for you particular operator. +type OperatorStatus struct { + // observedGeneration is the last generation change you've dealt with + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // conditions is a list of conditions and their status + Conditions []OperatorCondition `json:"conditions,omitempty"` + + // state indicates what the operator has observed to be its current operational status. + State ManagementState `json:"state,omitempty"` + // taskSummary is a high level summary of what the controller is currently attempting to do. It is high-level, human-readable + // and not guaranteed in any way. (I needed this for debugging and realized it made a great summary). + TaskSummary string `json:"taskSummary,omitempty"` + + // currentVersionAvailability is availability information for the current version. If it is unmanged or removed, this doesn't exist. + CurrentAvailability *VersionAvailability `json:"currentVersionAvailability,omitempty"` + // targetVersionAvailability is availability information for the target version if we are migrating + TargetAvailability *VersionAvailability `json:"targetVersionAvailability,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GenericOperatorConfig provides information to configure an operator +type GenericOperatorConfig struct { + metav1.TypeMeta `json:",inline"` + + // ServingInfo is the HTTP serving information for the controller's endpoints + ServingInfo configv1.HTTPServingInfo `json:"servingInfo,omitempty"` + + // leaderElection provides information to elect a leader. Only override this if you have a specific need + LeaderElection configv1.LeaderElection `json:"leaderElection,omitempty"` + + // authentication allows configuration of authentication for the endpoints + Authentication DelegatedAuthentication `json:"authentication,omitempty"` + // authorization allows configuration of authentication for the endpoints + Authorization DelegatedAuthorization `json:"authorization,omitempty"` +} + +// DelegatedAuthentication allows authentication to be disabled. +type DelegatedAuthentication struct { + // disabled indicates that authentication should be disabled. By default it will use delegated authentication. + Disabled bool `json:"disabled,omitempty"` +} + +// DelegatedAuthorization allows authorization to be disabled. +type DelegatedAuthorization struct { + // disabled indicates that authorization should be disabled. By default it will use delegated authorization. + Disabled bool `json:"disabled,omitempty"` +} + +// StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual +// node status must be tracked. +type StaticPodOperatorStatus struct { + OperatorStatus `json:",inline"` + + // latestAvailableDeploymentGeneration is the deploymentID of the most recent deployment + LatestAvailableDeploymentGeneration int32 `json:"latestAvailableDeploymentGeneration"` + + // nodeStatuses track the deployment values and errors across individual nodes + NodeStatuses []NodeStatus `json:"nodeStatuses"` +} + +// NodeStatus provides information about the current state of a particular node managed by this operator. +type NodeStatus struct { + // nodeName is the name of the node + NodeName string `json:"nodeName"` + + // currentDeploymentGeneration is the generation of the most recently successful deployment + CurrentDeploymentGeneration int32 `json:"currentDeploymentGeneration"` + // targetDeploymentGeneration is the generation of the deployment we're trying to apply + TargetDeploymentGeneration int32 `json:"targetDeploymentGeneration"` + // lastFailedDeploymentGeneration is the generation of the deployment we tried and failed to deploy. + LastFailedDeploymentGeneration int32 `json:"lastFailedDeploymentGeneration"` + + // lastFailedDeploymentGenerationErrors is a list of the errors during the failed deployment referenced in lastFailedDeploymentGeneration + LastFailedDeploymentErrors []string `json:"lastFailedDeploymentErrors"` +} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go new file mode 100644 index 0000000000000000000000000000000000000000..49f8b9522261987b5742bbc93eb236559301c3f8 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go @@ -0,0 +1,67 @@ +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageContentSourcePolicy holds cluster-wide information about how to handle registry mirror rules. +// When multiple policies are defined, the outcome of the behavior is defined on each field. +type ImageContentSourcePolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ImageContentSourcePolicySpec `json:"spec"` +} + +// ImageContentSourcePolicySpec is the specification of the ImageContentSourcePolicy CRD. +type ImageContentSourcePolicySpec struct { + // repositoryDigestMirrors allows images referenced by image digests in pods to be + // pulled from alternative mirrored repository locations. The image pull specification + // provided to the pod will be compared to the source locations described in RepositoryDigestMirrors + // and the image may be pulled down from any of the mirrors in the list instead of the + // specified repository allowing administrators to choose a potentially faster mirror. + // Only image pull specifications that have an image disgest will have this behavior applied + // to them - tags will continue to be pulled from the specified repository in the pull spec. + // + // Each “source†repository is treated independently; configurations for different “source†+ // repositories don’t interact. + // + // When multiple policies are defined for the same “source†repository, the sets of defined + // mirrors will be merged together, preserving the relative order of the mirrors, if possible. + // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the + // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict + // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. + // +optional + RepositoryDigestMirrors []RepositoryDigestMirrors `json:"repositoryDigestMirrors"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageContentSourcePolicyList lists the items in the ImageContentSourcePolicy CRD. +type ImageContentSourcePolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ImageContentSourcePolicy `json:"items"` +} + +// RepositoryDigestMirrors holds cluster-wide information about how to handle mirros in the registries config. +// Note: the mirrors only work when pulling the images that are referenced by their digests. +type RepositoryDigestMirrors struct { + // source is the repository that users refer to, e.g. in image pull specifications. + // +required + Source string `json:"source"` + // mirrors is one or more repositories that may also contain the same images. + // The order of mirrors in this list is treated as the user's desired priority, while source + // is by default considered lower priority than all mirrors. Other cluster configuration, + // including (but not limited to) other repositoryDigestMirrors objects, + // may impact the exact order mirrors are contacted in, or some mirrors may be contacted + // in parallel, so this should be considered a preference rather than a guarantee of ordering. + // +optional + Mirrors []string `json:"mirrors"` +} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..79f75bd0beaf9e512573b36b764663593c2ffd95 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,344 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegatedAuthentication) DeepCopyInto(out *DelegatedAuthentication) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthentication. +func (in *DelegatedAuthentication) DeepCopy() *DelegatedAuthentication { + if in == nil { + return nil + } + out := new(DelegatedAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegatedAuthorization) DeepCopyInto(out *DelegatedAuthorization) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthorization. +func (in *DelegatedAuthorization) DeepCopy() *DelegatedAuthorization { + if in == nil { + return nil + } + out := new(DelegatedAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenerationHistory) DeepCopyInto(out *GenerationHistory) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerationHistory. +func (in *GenerationHistory) DeepCopy() *GenerationHistory { + if in == nil { + return nil + } + out := new(GenerationHistory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericOperatorConfig) DeepCopyInto(out *GenericOperatorConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + out.LeaderElection = in.LeaderElection + out.Authentication = in.Authentication + out.Authorization = in.Authorization + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericOperatorConfig. +func (in *GenericOperatorConfig) DeepCopy() *GenericOperatorConfig { + if in == nil { + return nil + } + out := new(GenericOperatorConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GenericOperatorConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentSourcePolicy) DeepCopyInto(out *ImageContentSourcePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentSourcePolicy. +func (in *ImageContentSourcePolicy) DeepCopy() *ImageContentSourcePolicy { + if in == nil { + return nil + } + out := new(ImageContentSourcePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageContentSourcePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentSourcePolicyList) DeepCopyInto(out *ImageContentSourcePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageContentSourcePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentSourcePolicyList. +func (in *ImageContentSourcePolicyList) DeepCopy() *ImageContentSourcePolicyList { + if in == nil { + return nil + } + out := new(ImageContentSourcePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageContentSourcePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentSourcePolicySpec) DeepCopyInto(out *ImageContentSourcePolicySpec) { + *out = *in + if in.RepositoryDigestMirrors != nil { + in, out := &in.RepositoryDigestMirrors, &out.RepositoryDigestMirrors + *out = make([]RepositoryDigestMirrors, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentSourcePolicySpec. +func (in *ImageContentSourcePolicySpec) DeepCopy() *ImageContentSourcePolicySpec { + if in == nil { + return nil + } + out := new(ImageContentSourcePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfig) DeepCopyInto(out *LoggingConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfig. +func (in *LoggingConfig) DeepCopy() *LoggingConfig { + if in == nil { + return nil + } + out := new(LoggingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in + if in.LastFailedDeploymentErrors != nil { + in, out := &in.LastFailedDeploymentErrors, &out.LastFailedDeploymentErrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorCondition) DeepCopyInto(out *OperatorCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorCondition. +func (in *OperatorCondition) DeepCopy() *OperatorCondition { + if in == nil { + return nil + } + out := new(OperatorCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorSpec) DeepCopyInto(out *OperatorSpec) { + *out = *in + out.Logging = in.Logging + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorSpec. +func (in *OperatorSpec) DeepCopy() *OperatorSpec { + if in == nil { + return nil + } + out := new(OperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorStatus) DeepCopyInto(out *OperatorStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CurrentAvailability != nil { + in, out := &in.CurrentAvailability, &out.CurrentAvailability + *out = new(VersionAvailability) + (*in).DeepCopyInto(*out) + } + if in.TargetAvailability != nil { + in, out := &in.TargetAvailability, &out.TargetAvailability + *out = new(VersionAvailability) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorStatus. +func (in *OperatorStatus) DeepCopy() *OperatorStatus { + if in == nil { + return nil + } + out := new(OperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryDigestMirrors) DeepCopyInto(out *RepositoryDigestMirrors) { + *out = *in + if in.Mirrors != nil { + in, out := &in.Mirrors, &out.Mirrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryDigestMirrors. +func (in *RepositoryDigestMirrors) DeepCopy() *RepositoryDigestMirrors { + if in == nil { + return nil + } + out := new(RepositoryDigestMirrors) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticPodOperatorStatus) DeepCopyInto(out *StaticPodOperatorStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + if in.NodeStatuses != nil { + in, out := &in.NodeStatuses, &out.NodeStatuses + *out = make([]NodeStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticPodOperatorStatus. +func (in *StaticPodOperatorStatus) DeepCopy() *StaticPodOperatorStatus { + if in == nil { + return nil + } + out := new(StaticPodOperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersionAvailability) DeepCopyInto(out *VersionAvailability) { + *out = *in + if in.Errors != nil { + in, out := &in.Errors, &out.Errors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Generations != nil { + in, out := &in.Generations, &out.Generations + *out = make([]GenerationHistory, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionAvailability. +func (in *VersionAvailability) DeepCopy() *VersionAvailability { + if in == nil { + return nil + } + out := new(VersionAvailability) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..5a32df8389fab8b22714e23a5b438048c23bd73d --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,174 @@ +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_DelegatedAuthentication = map[string]string{ + "": "DelegatedAuthentication allows authentication to be disabled.", + "disabled": "disabled indicates that authentication should be disabled. By default it will use delegated authentication.", +} + +func (DelegatedAuthentication) SwaggerDoc() map[string]string { + return map_DelegatedAuthentication +} + +var map_DelegatedAuthorization = map[string]string{ + "": "DelegatedAuthorization allows authorization to be disabled.", + "disabled": "disabled indicates that authorization should be disabled. By default it will use delegated authorization.", +} + +func (DelegatedAuthorization) SwaggerDoc() map[string]string { + return map_DelegatedAuthorization +} + +var map_GenerationHistory = map[string]string{ + "": "GenerationHistory keeps track of the generation for a given resource so that decisions about forced updated can be made.", + "group": "group is the group of the thing you're tracking", + "resource": "resource is the resource type of the thing you're tracking", + "namespace": "namespace is where the thing you're tracking is", + "name": "name is the name of the thing you're tracking", + "lastGeneration": "lastGeneration is the last generation of the workload controller involved", +} + +func (GenerationHistory) SwaggerDoc() map[string]string { + return map_GenerationHistory +} + +var map_GenericOperatorConfig = map[string]string{ + "": "GenericOperatorConfig provides information to configure an operator", + "servingInfo": "ServingInfo is the HTTP serving information for the controller's endpoints", + "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need", + "authentication": "authentication allows configuration of authentication for the endpoints", + "authorization": "authorization allows configuration of authentication for the endpoints", +} + +func (GenericOperatorConfig) SwaggerDoc() map[string]string { + return map_GenericOperatorConfig +} + +var map_LoggingConfig = map[string]string{ + "": "LoggingConfig holds information about configuring logging", + "level": "level is passed to glog.", + "vmodule": "vmodule is passed to glog.", +} + +func (LoggingConfig) SwaggerDoc() map[string]string { + return map_LoggingConfig +} + +var map_NodeStatus = map[string]string{ + "": "NodeStatus provides information about the current state of a particular node managed by this operator.", + "nodeName": "nodeName is the name of the node", + "currentDeploymentGeneration": "currentDeploymentGeneration is the generation of the most recently successful deployment", + "targetDeploymentGeneration": "targetDeploymentGeneration is the generation of the deployment we're trying to apply", + "lastFailedDeploymentGeneration": "lastFailedDeploymentGeneration is the generation of the deployment we tried and failed to deploy.", + "lastFailedDeploymentErrors": "lastFailedDeploymentGenerationErrors is a list of the errors during the failed deployment referenced in lastFailedDeploymentGeneration", +} + +func (NodeStatus) SwaggerDoc() map[string]string { + return map_NodeStatus +} + +var map_OperatorCondition = map[string]string{ + "": "OperatorCondition is just the standard condition fields.", +} + +func (OperatorCondition) SwaggerDoc() map[string]string { + return map_OperatorCondition +} + +var map_OperatorSpec = map[string]string{ + "": "OperatorSpec contains common fields for an operator to need. It is intended to be anonymous included inside of the Spec struct for you particular operator.", + "managementState": "managementState indicates whether and how the operator should manage the component", + "imagePullSpec": "imagePullSpec is the image to use for the component.", + "imagePullPolicy": "imagePullPolicy specifies the image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.", + "version": "version is the desired state in major.minor.micro-patch. Usually patch is ignored.", + "logging": "logging contains glog parameters for the component pods. It's always a command line arg for the moment", +} + +func (OperatorSpec) SwaggerDoc() map[string]string { + return map_OperatorSpec +} + +var map_OperatorStatus = map[string]string{ + "": "OperatorStatus contains common fields for an operator to need. It is intended to be anonymous included inside of the Status struct for you particular operator.", + "observedGeneration": "observedGeneration is the last generation change you've dealt with", + "conditions": "conditions is a list of conditions and their status", + "state": "state indicates what the operator has observed to be its current operational status.", + "taskSummary": "taskSummary is a high level summary of what the controller is currently attempting to do. It is high-level, human-readable and not guaranteed in any way. (I needed this for debugging and realized it made a great summary).", + "currentVersionAvailability": "currentVersionAvailability is availability information for the current version. If it is unmanged or removed, this doesn't exist.", + "targetVersionAvailability": "targetVersionAvailability is availability information for the target version if we are migrating", +} + +func (OperatorStatus) SwaggerDoc() map[string]string { + return map_OperatorStatus +} + +var map_StaticPodOperatorStatus = map[string]string{ + "": "StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual node status must be tracked.", + "latestAvailableDeploymentGeneration": "latestAvailableDeploymentGeneration is the deploymentID of the most recent deployment", + "nodeStatuses": "nodeStatuses track the deployment values and errors across individual nodes", +} + +func (StaticPodOperatorStatus) SwaggerDoc() map[string]string { + return map_StaticPodOperatorStatus +} + +var map_VersionAvailability = map[string]string{ + "": "VersionAvailability gives information about the synchronization and operational status of a particular version of the component", + "version": "version is the level this availability applies to", + "updatedReplicas": "updatedReplicas indicates how many replicas are at the desired state", + "readyReplicas": "readyReplicas indicates how many replicas are ready and at the desired state", + "errors": "errors indicates what failures are associated with the operator trying to manage this version", + "generations": "generations allows an operator to track what the generation of \"important\" resources was the last time we updated them", +} + +func (VersionAvailability) SwaggerDoc() map[string]string { + return map_VersionAvailability +} + +var map_ImageContentSourcePolicy = map[string]string{ + "": "ImageContentSourcePolicy holds cluster-wide information about how to handle registry mirror rules. When multiple policies are defined, the outcome of the behavior is defined on each field.", + "spec": "spec holds user settable values for configuration", +} + +func (ImageContentSourcePolicy) SwaggerDoc() map[string]string { + return map_ImageContentSourcePolicy +} + +var map_ImageContentSourcePolicyList = map[string]string{ + "": "ImageContentSourcePolicyList lists the items in the ImageContentSourcePolicy CRD.", +} + +func (ImageContentSourcePolicyList) SwaggerDoc() map[string]string { + return map_ImageContentSourcePolicyList +} + +var map_ImageContentSourcePolicySpec = map[string]string{ + "": "ImageContentSourcePolicySpec is the specification of the ImageContentSourcePolicy CRD.", + "repositoryDigestMirrors": "repositoryDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in RepositoryDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. Only image pull specifications that have an image disgest will have this behavior applied to them - tags will continue to be pulled from the specified repository in the pull spec.\n\nEach “source†repository is treated independently; configurations for different “source†repositories don’t interact.\n\nWhen multiple policies are defined for the same “source†repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified.", +} + +func (ImageContentSourcePolicySpec) SwaggerDoc() map[string]string { + return map_ImageContentSourcePolicySpec +} + +var map_RepositoryDigestMirrors = map[string]string{ + "": "RepositoryDigestMirrors holds cluster-wide information about how to handle mirros in the registries config. Note: the mirrors only work when pulling the images that are referenced by their digests.", + "source": "source is the repository that users refer to, e.g. in image pull specifications.", + "mirrors": "mirrors is one or more repositories that may also contain the same images. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. Other cluster configuration, including (but not limited to) other repositoryDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering.", +} + +func (RepositoryDigestMirrors) SwaggerDoc() map[string]string { + return map_RepositoryDigestMirrors +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/osin/install.go b/vendor/github.com/openshift/api/osin/install.go new file mode 100644 index 0000000000000000000000000000000000000000..3f773985b4b878e1d687e5d22064153fd63fc4dd --- /dev/null +++ b/vendor/github.com/openshift/api/osin/install.go @@ -0,0 +1,26 @@ +package osin + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + osinv1 "github.com/openshift/api/osin/v1" +) + +const ( + GroupName = "osin.config.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(osinv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/osin/v1/doc.go b/vendor/github.com/openshift/api/osin/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..b74dfc48ad0efcf46aaad06d6dc8b5551f863897 --- /dev/null +++ b/vendor/github.com/openshift/api/osin/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=osin.config.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/osin/v1/register.go b/vendor/github.com/openshift/api/osin/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..4d54a5df40474db234bde2ae9ca5d329325f1fb8 --- /dev/null +++ b/vendor/github.com/openshift/api/osin/v1/register.go @@ -0,0 +1,50 @@ +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "osin.config.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &OsinServerConfig{}, + + &BasicAuthPasswordIdentityProvider{}, + &AllowAllPasswordIdentityProvider{}, + &DenyAllPasswordIdentityProvider{}, + &HTPasswdPasswordIdentityProvider{}, + &LDAPPasswordIdentityProvider{}, + &KeystonePasswordIdentityProvider{}, + &RequestHeaderIdentityProvider{}, + &GitHubIdentityProvider{}, + &GitLabIdentityProvider{}, + &GoogleIdentityProvider{}, + &OpenIDIdentityProvider{}, + + &SessionSecrets{}, + ) + return nil +} diff --git a/vendor/github.com/openshift/api/osin/v1/types.go b/vendor/github.com/openshift/api/osin/v1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..f2c3b32bf74ede3cdb6c1825f32f1896f3900ac4 --- /dev/null +++ b/vendor/github.com/openshift/api/osin/v1/types.go @@ -0,0 +1,431 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + configv1 "github.com/openshift/api/config/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type OsinServerConfig struct { + metav1.TypeMeta `json:",inline"` + + // provides the standard apiserver configuration + configv1.GenericAPIServerConfig `json:",inline"` + + // oauthConfig holds the necessary configuration options for OAuth authentication + OAuthConfig OAuthConfig `json:"oauthConfig"` +} + +// OAuthConfig holds the necessary configuration options for OAuth authentication +type OAuthConfig struct { + // masterCA is the CA for verifying the TLS connection back to the MasterURL. + // This field is deprecated and will be removed in a future release. + // See loginURL for details. + // Deprecated + MasterCA *string `json:"masterCA"` + + // masterURL is used for making server-to-server calls to exchange authorization codes for access tokens + // This field is deprecated and will be removed in a future release. + // See loginURL for details. + // Deprecated + MasterURL string `json:"masterURL"` + + // masterPublicURL is used for building valid client redirect URLs for internal and external access + // This field is deprecated and will be removed in a future release. + // See loginURL for details. + // Deprecated + MasterPublicURL string `json:"masterPublicURL"` + + // loginURL, along with masterCA, masterURL and masterPublicURL have distinct + // meanings depending on how the OAuth server is run. The two states are: + // 1. embedded in the kube api server (all 3.x releases) + // 2. as a standalone external process (all 4.x releases) + // in the embedded configuration, loginURL is equivalent to masterPublicURL + // and the other fields have functionality that matches their docs. + // in the standalone configuration, the fields are used as: + // loginURL is the URL required to login to the cluster: + // oc login --server=<loginURL> + // masterPublicURL is the issuer URL + // it is accessible from inside (service network) and outside (ingress) of the cluster + // masterURL is the loopback variation of the token_endpoint URL with no path component + // it is only accessible from inside (service network) of the cluster + // masterCA is used to perform TLS verification for connections made to masterURL + // For further details, see the IETF Draft: + // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + LoginURL string `json:"loginURL"` + + // assetPublicURL is used for building valid client redirect URLs for external access + AssetPublicURL string `json:"assetPublicURL"` + + // alwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider. + AlwaysShowProviderSelection bool `json:"alwaysShowProviderSelection"` + + //identityProviders is an ordered list of ways for a user to identify themselves + IdentityProviders []IdentityProvider `json:"identityProviders"` + + // grantConfig describes how to handle grants + GrantConfig GrantConfig `json:"grantConfig"` + + // sessionConfig hold information about configuring sessions. + SessionConfig *SessionConfig `json:"sessionConfig"` + + // tokenConfig contains options for authorization and access tokens + TokenConfig TokenConfig `json:"tokenConfig"` + + // templates allow you to customize pages like the login page. + Templates *OAuthTemplates `json:"templates"` +} + +// OAuthTemplates allow for customization of pages like the login page +type OAuthTemplates struct { + // login is a path to a file containing a go template used to render the login page. + // If unspecified, the default login page is used. + Login string `json:"login"` + + // providerSelection is a path to a file containing a go template used to render the provider selection page. + // If unspecified, the default provider selection page is used. + ProviderSelection string `json:"providerSelection"` + + // error is a path to a file containing a go template used to render error pages during the authentication or grant flow + // If unspecified, the default error page is used. + Error string `json:"error"` +} + +// IdentityProvider provides identities for users authenticating using credentials +type IdentityProvider struct { + // name is used to qualify the identities returned by this provider + Name string `json:"name"` + // challenge indicates whether to issue WWW-Authenticate challenges for this provider + UseAsChallenger bool `json:"challenge"` + // login indicates whether to use this identity provider for unauthenticated browsers to login against + UseAsLogin bool `json:"login"` + // mappingMethod determines how identities from this provider are mapped to users + MappingMethod string `json:"mappingMethod"` + // provider contains the information about how to set up a specific identity provider + // +kubebuilder:pruning:PreserveUnknownFields + Provider runtime.RawExtension `json:"provider"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials +type BasicAuthPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // RemoteConnectionInfo contains information about how to connect to the external basic auth server + configv1.RemoteConnectionInfo `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AllowAllPasswordIdentityProvider provides identities for users authenticating using non-empty passwords +type AllowAllPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DenyAllPasswordIdentityProvider provides no identities for users +type DenyAllPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials +type HTPasswdPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // file is a reference to your htpasswd file + File string `json:"file"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials +type LDAPPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + // url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is + // ldap://host:port/basedn?attribute?scope?filter + URL string `json:"url"` + // bindDN is an optional DN to bind with during the search phase. + BindDN string `json:"bindDN"` + // bindPassword is an optional password to bind with during the search phase. + BindPassword configv1.StringSource `json:"bindPassword"` + + // insecure, if true, indicates the connection should not use TLS. + // Cannot be set to true with a URL scheme of "ldaps://" + // If false, "ldaps://" URLs connect using TLS, and "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830 + Insecure bool `json:"insecure"` + // ca is the optional trusted certificate authority bundle to use when making requests to the server + // If empty, the default system roots are used + CA string `json:"ca"` + // attributes maps LDAP attributes to identities + Attributes LDAPAttributeMapping `json:"attributes"` +} + +// LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields +type LDAPAttributeMapping struct { + // id is the list of attributes whose values should be used as the user ID. Required. + // LDAP standard identity attribute is "dn" + ID []string `json:"id"` + // preferredUsername is the list of attributes whose values should be used as the preferred username. + // LDAP standard login attribute is "uid" + PreferredUsername []string `json:"preferredUsername"` + // name is the list of attributes whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + // LDAP standard display name attribute is "cn" + Name []string `json:"name"` + // email is the list of attributes whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + Email []string `json:"email"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials +type KeystonePasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + // RemoteConnectionInfo contains information about how to connect to the keystone server + configv1.RemoteConnectionInfo `json:",inline"` + // domainName is required for keystone v3 + DomainName string `json:"domainName"` + // useKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username + UseKeystoneIdentity bool `json:"useKeystoneIdentity"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials +type RequestHeaderIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // loginURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + LoginURL string `json:"loginURL"` + + // challengeURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + ChallengeURL string `json:"challengeURL"` + + // clientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header. + ClientCA string `json:"clientCA"` + // clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative. + ClientCommonNames []string `json:"clientCommonNames"` + + // headers is the set of headers to check for identity information + Headers []string `json:"headers"` + // preferredUsernameHeaders is the set of headers to check for the preferred username + PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"` + // nameHeaders is the set of headers to check for the display name + NameHeaders []string `json:"nameHeaders"` + // emailHeaders is the set of headers to check for the email address + EmailHeaders []string `json:"emailHeaders"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GitHubIdentityProvider provides identities for users authenticating using GitHub credentials +type GitHubIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // clientID is the oauth client ID + ClientID string `json:"clientID"` + // clientSecret is the oauth client secret + ClientSecret configv1.StringSource `json:"clientSecret"` + // organizations optionally restricts which organizations are allowed to log in + Organizations []string `json:"organizations"` + // teams optionally restricts which teams are allowed to log in. Format is <org>/<team>. + Teams []string `json:"teams"` + // hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of GitHub Enterprise. + // It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname. + Hostname string `json:"hostname"` + // ca is the optional trusted certificate authority bundle to use when making requests to the server. + // If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. + CA string `json:"ca"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GitLabIdentityProvider provides identities for users authenticating using GitLab credentials +type GitLabIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // ca is the optional trusted certificate authority bundle to use when making requests to the server + // If empty, the default system roots are used + CA string `json:"ca"` + // url is the oauth server base URL + URL string `json:"url"` + // clientID is the oauth client ID + ClientID string `json:"clientID"` + // clientSecret is the oauth client secret + ClientSecret configv1.StringSource `json:"clientSecret"` + // legacy determines if OAuth2 or OIDC should be used + // If true, OAuth2 is used + // If false, OIDC is used + // If nil and the URL's host is gitlab.com, OIDC is used + // Otherwise, OAuth2 is used + // In a future release, nil will default to using OIDC + // Eventually this flag will be removed and only OIDC will be used + Legacy *bool `json:"legacy,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GoogleIdentityProvider provides identities for users authenticating using Google credentials +type GoogleIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // clientID is the oauth client ID + ClientID string `json:"clientID"` + // clientSecret is the oauth client secret + ClientSecret configv1.StringSource `json:"clientSecret"` + + // hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to + HostedDomain string `json:"hostedDomain"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials +type OpenIDIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // ca is the optional trusted certificate authority bundle to use when making requests to the server + // If empty, the default system roots are used + CA string `json:"ca"` + + // clientID is the oauth client ID + ClientID string `json:"clientID"` + // clientSecret is the oauth client secret + ClientSecret configv1.StringSource `json:"clientSecret"` + + // extraScopes are any scopes to request in addition to the standard "openid" scope. + ExtraScopes []string `json:"extraScopes"` + + // extraAuthorizeParameters are any custom parameters to add to the authorize request. + ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters"` + + // urls to use to authenticate + URLs OpenIDURLs `json:"urls"` + + // claims mappings + Claims OpenIDClaims `json:"claims"` +} + +// OpenIDURLs are URLs to use when authenticating with an OpenID identity provider +type OpenIDURLs struct { + // authorize is the oauth authorization URL + Authorize string `json:"authorize"` + // token is the oauth token granting URL + Token string `json:"token"` + // userInfo is the optional userinfo URL. + // If present, a granted access_token is used to request claims + // If empty, a granted id_token is parsed for claims + UserInfo string `json:"userInfo"` +} + +// OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider +type OpenIDClaims struct { + // id is the list of claims whose values should be used as the user ID. Required. + // OpenID standard identity claim is "sub" + ID []string `json:"id"` + // preferredUsername is the list of claims whose values should be used as the preferred username. + // If unspecified, the preferred username is determined from the value of the id claim + PreferredUsername []string `json:"preferredUsername"` + // name is the list of claims whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + Name []string `json:"name"` + // email is the list of claims whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + Email []string `json:"email"` +} + +// GrantConfig holds the necessary configuration options for grant handlers +type GrantConfig struct { + // method determines the default strategy to use when an OAuth client requests a grant. + // This method will be used only if the specific OAuth client doesn't provide a strategy + // of their own. Valid grant handling methods are: + // - auto: always approves grant requests, useful for trusted clients + // - prompt: prompts the end user for approval of grant requests, useful for third-party clients + // - deny: always denies grant requests, useful for black-listed clients + Method GrantHandlerType `json:"method"` + + // serviceAccountMethod is used for determining client authorization for service account oauth client. + // It must be either: deny, prompt + ServiceAccountMethod GrantHandlerType `json:"serviceAccountMethod"` +} + +type GrantHandlerType string + +const ( + // auto auto-approves client authorization grant requests + GrantHandlerAuto GrantHandlerType = "auto" + // prompt prompts the user to approve new client authorization grant requests + GrantHandlerPrompt GrantHandlerType = "prompt" + // deny auto-denies client authorization grant requests + GrantHandlerDeny GrantHandlerType = "deny" +) + +// SessionConfig specifies options for cookie-based sessions. Used by AuthRequestHandlerSession +type SessionConfig struct { + // sessionSecretsFile is a reference to a file containing a serialized SessionSecrets object + // If no file is specified, a random signing and encryption key are generated at each server start + SessionSecretsFile string `json:"sessionSecretsFile"` + // sessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession + SessionMaxAgeSeconds int32 `json:"sessionMaxAgeSeconds"` + // sessionName is the cookie name used to store the session + SessionName string `json:"sessionName"` +} + +// TokenConfig holds the necessary configuration options for authorization and access tokens +type TokenConfig struct { + // authorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens + AuthorizeTokenMaxAgeSeconds int32 `json:"authorizeTokenMaxAgeSeconds"` + // accessTokenMaxAgeSeconds defines the maximum age of access tokens + AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds"` + // accessTokenInactivityTimeoutSeconds defined the default token + // inactivity timeout for tokens granted by any client. + // Setting it to nil means the feature is completely disabled (default) + // The default setting can be overriden on OAuthClient basis. + // The value represents the maximum amount of time that can occur between + // consecutive uses of the token. Tokens become invalid if they are not + // used within this temporal window. The user will need to acquire a new + // token to regain access once a token times out. + // Valid values are: + // - 0: Tokens never time out + // - X: Tokens time out if there is no activity for X seconds + // The current minimum allowed value for X is 300 (5 minutes) + AccessTokenInactivityTimeoutSeconds *int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SessionSecrets list the secrets to use to sign/encrypt and authenticate/decrypt created sessions. +type SessionSecrets struct { + metav1.TypeMeta `json:",inline"` + + // Secrets is a list of secrets + // New sessions are signed and encrypted using the first secret. + // Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets. + Secrets []SessionSecret `json:"secrets"` +} + +// SessionSecret is a secret used to authenticate/decrypt cookie-based sessions +type SessionSecret struct { + // Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes. + Authentication string `json:"authentication"` + // Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES- + Encryption string `json:"encryption"` +} diff --git a/vendor/github.com/openshift/api/osin/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/osin/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..8636ca9500652f63e70c91885788bc6f01d96161 --- /dev/null +++ b/vendor/github.com/openshift/api/osin/v1/zz_generated.deepcopy.go @@ -0,0 +1,633 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowAllPasswordIdentityProvider) DeepCopyInto(out *AllowAllPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowAllPasswordIdentityProvider. +func (in *AllowAllPasswordIdentityProvider) DeepCopy() *AllowAllPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(AllowAllPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AllowAllPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthPasswordIdentityProvider) DeepCopyInto(out *BasicAuthPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.RemoteConnectionInfo = in.RemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthPasswordIdentityProvider. +func (in *BasicAuthPasswordIdentityProvider) DeepCopy() *BasicAuthPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(BasicAuthPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BasicAuthPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DenyAllPasswordIdentityProvider) DeepCopyInto(out *DenyAllPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DenyAllPasswordIdentityProvider. +func (in *DenyAllPasswordIdentityProvider) DeepCopy() *DenyAllPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(DenyAllPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DenyAllPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitHubIdentityProvider) DeepCopyInto(out *GitHubIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + if in.Organizations != nil { + in, out := &in.Organizations, &out.Organizations + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Teams != nil { + in, out := &in.Teams, &out.Teams + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubIdentityProvider. +func (in *GitHubIdentityProvider) DeepCopy() *GitHubIdentityProvider { + if in == nil { + return nil + } + out := new(GitHubIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitHubIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLabIdentityProvider) DeepCopyInto(out *GitLabIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + if in.Legacy != nil { + in, out := &in.Legacy, &out.Legacy + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabIdentityProvider. +func (in *GitLabIdentityProvider) DeepCopy() *GitLabIdentityProvider { + if in == nil { + return nil + } + out := new(GitLabIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitLabIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleIdentityProvider) DeepCopyInto(out *GoogleIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleIdentityProvider. +func (in *GoogleIdentityProvider) DeepCopy() *GoogleIdentityProvider { + if in == nil { + return nil + } + out := new(GoogleIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GoogleIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GrantConfig) DeepCopyInto(out *GrantConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrantConfig. +func (in *GrantConfig) DeepCopy() *GrantConfig { + if in == nil { + return nil + } + out := new(GrantConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTPasswdPasswordIdentityProvider) DeepCopyInto(out *HTPasswdPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTPasswdPasswordIdentityProvider. +func (in *HTPasswdPasswordIdentityProvider) DeepCopy() *HTPasswdPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(HTPasswdPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTPasswdPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) { + *out = *in + in.Provider.DeepCopyInto(&out.Provider) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProvider. +func (in *IdentityProvider) DeepCopy() *IdentityProvider { + if in == nil { + return nil + } + out := new(IdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeystonePasswordIdentityProvider) DeepCopyInto(out *KeystonePasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.RemoteConnectionInfo = in.RemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystonePasswordIdentityProvider. +func (in *KeystonePasswordIdentityProvider) DeepCopy() *KeystonePasswordIdentityProvider { + if in == nil { + return nil + } + out := new(KeystonePasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KeystonePasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPAttributeMapping) DeepCopyInto(out *LDAPAttributeMapping) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPAttributeMapping. +func (in *LDAPAttributeMapping) DeepCopy() *LDAPAttributeMapping { + if in == nil { + return nil + } + out := new(LDAPAttributeMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPPasswordIdentityProvider) DeepCopyInto(out *LDAPPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.BindPassword = in.BindPassword + in.Attributes.DeepCopyInto(&out.Attributes) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPPasswordIdentityProvider. +func (in *LDAPPasswordIdentityProvider) DeepCopy() *LDAPPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(LDAPPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LDAPPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthConfig) DeepCopyInto(out *OAuthConfig) { + *out = *in + if in.MasterCA != nil { + in, out := &in.MasterCA, &out.MasterCA + *out = new(string) + **out = **in + } + if in.IdentityProviders != nil { + in, out := &in.IdentityProviders, &out.IdentityProviders + *out = make([]IdentityProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.GrantConfig = in.GrantConfig + if in.SessionConfig != nil { + in, out := &in.SessionConfig, &out.SessionConfig + *out = new(SessionConfig) + **out = **in + } + in.TokenConfig.DeepCopyInto(&out.TokenConfig) + if in.Templates != nil { + in, out := &in.Templates, &out.Templates + *out = new(OAuthTemplates) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthConfig. +func (in *OAuthConfig) DeepCopy() *OAuthConfig { + if in == nil { + return nil + } + out := new(OAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthTemplates) DeepCopyInto(out *OAuthTemplates) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTemplates. +func (in *OAuthTemplates) DeepCopy() *OAuthTemplates { + if in == nil { + return nil + } + out := new(OAuthTemplates) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDClaims. +func (in *OpenIDClaims) DeepCopy() *OpenIDClaims { + if in == nil { + return nil + } + out := new(OpenIDClaims) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDIdentityProvider) DeepCopyInto(out *OpenIDIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + if in.ExtraScopes != nil { + in, out := &in.ExtraScopes, &out.ExtraScopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraAuthorizeParameters != nil { + in, out := &in.ExtraAuthorizeParameters, &out.ExtraAuthorizeParameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.URLs = in.URLs + in.Claims.DeepCopyInto(&out.Claims) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDIdentityProvider. +func (in *OpenIDIdentityProvider) DeepCopy() *OpenIDIdentityProvider { + if in == nil { + return nil + } + out := new(OpenIDIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenIDIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDURLs) DeepCopyInto(out *OpenIDURLs) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDURLs. +func (in *OpenIDURLs) DeepCopy() *OpenIDURLs { + if in == nil { + return nil + } + out := new(OpenIDURLs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsinServerConfig) DeepCopyInto(out *OsinServerConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.GenericAPIServerConfig.DeepCopyInto(&out.GenericAPIServerConfig) + in.OAuthConfig.DeepCopyInto(&out.OAuthConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsinServerConfig. +func (in *OsinServerConfig) DeepCopy() *OsinServerConfig { + if in == nil { + return nil + } + out := new(OsinServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OsinServerConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderIdentityProvider) DeepCopyInto(out *RequestHeaderIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ClientCommonNames != nil { + in, out := &in.ClientCommonNames, &out.ClientCommonNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsernameHeaders != nil { + in, out := &in.PreferredUsernameHeaders, &out.PreferredUsernameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NameHeaders != nil { + in, out := &in.NameHeaders, &out.NameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EmailHeaders != nil { + in, out := &in.EmailHeaders, &out.EmailHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderIdentityProvider. +func (in *RequestHeaderIdentityProvider) DeepCopy() *RequestHeaderIdentityProvider { + if in == nil { + return nil + } + out := new(RequestHeaderIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RequestHeaderIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionConfig) DeepCopyInto(out *SessionConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionConfig. +func (in *SessionConfig) DeepCopy() *SessionConfig { + if in == nil { + return nil + } + out := new(SessionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionSecret) DeepCopyInto(out *SessionSecret) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionSecret. +func (in *SessionSecret) DeepCopy() *SessionSecret { + if in == nil { + return nil + } + out := new(SessionSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionSecrets) DeepCopyInto(out *SessionSecrets) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SessionSecret, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionSecrets. +func (in *SessionSecrets) DeepCopy() *SessionSecrets { + if in == nil { + return nil + } + out := new(SessionSecrets) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SessionSecrets) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenConfig) DeepCopyInto(out *TokenConfig) { + *out = *in + if in.AccessTokenInactivityTimeoutSeconds != nil { + in, out := &in.AccessTokenInactivityTimeoutSeconds, &out.AccessTokenInactivityTimeoutSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenConfig. +func (in *TokenConfig) DeepCopy() *TokenConfig { + if in == nil { + return nil + } + out := new(TokenConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/pkg/serialization/serialization.go b/vendor/github.com/openshift/api/pkg/serialization/serialization.go new file mode 100644 index 0000000000000000000000000000000000000000..70c8e7a9943b5b62f8378db2d512f465fdaf1732 --- /dev/null +++ b/vendor/github.com/openshift/api/pkg/serialization/serialization.go @@ -0,0 +1,45 @@ +package serialization + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// DecodeNestedRawExtensionOrUnknown +func DecodeNestedRawExtensionOrUnknown(d runtime.Decoder, ext *runtime.RawExtension) { + if ext.Raw == nil || ext.Object != nil { + return + } + obj, gvk, err := d.Decode(ext.Raw, nil, nil) + if err != nil { + unk := &runtime.Unknown{Raw: ext.Raw} + if runtime.IsNotRegisteredError(err) { + if _, gvk, err := d.Decode(ext.Raw, nil, unk); err == nil { + unk.APIVersion = gvk.GroupVersion().String() + unk.Kind = gvk.Kind + ext.Object = unk + return + } + } + // TODO: record mime-type with the object + if gvk != nil { + unk.APIVersion = gvk.GroupVersion().String() + unk.Kind = gvk.Kind + } + obj = unk + } + ext.Object = obj +} + +// EncodeNestedRawExtension will encode the object in the RawExtension (if not nil) or +// return an error. +func EncodeNestedRawExtension(e runtime.Encoder, ext *runtime.RawExtension) error { + if ext.Raw != nil || ext.Object == nil { + return nil + } + data, err := runtime.Encode(e, ext.Object) + if err != nil { + return err + } + ext.Raw = data + return nil +} diff --git a/vendor/github.com/openshift/api/project/OWNERS b/vendor/github.com/openshift/api/project/OWNERS new file mode 100644 index 0000000000000000000000000000000000000000..9b1548f568af13bbb2a18f7c9f9475bfdb43af10 --- /dev/null +++ b/vendor/github.com/openshift/api/project/OWNERS @@ -0,0 +1,2 @@ +reviewers: + - mfojtik diff --git a/vendor/github.com/openshift/api/project/install.go b/vendor/github.com/openshift/api/project/install.go new file mode 100644 index 0000000000000000000000000000000000000000..c96c7aa265368ef3e1ab2a14ed2eb6ffb5896168 --- /dev/null +++ b/vendor/github.com/openshift/api/project/install.go @@ -0,0 +1,26 @@ +package project + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + projectv1 "github.com/openshift/api/project/v1" +) + +const ( + GroupName = "project.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(projectv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/project/v1/doc.go b/vendor/github.com/openshift/api/project/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..5bbd9d5ea7a259223180261efe993bf3416303f8 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/project/apis/project +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=project.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/project/v1/generated.pb.go b/vendor/github.com/openshift/api/project/v1/generated.pb.go new file mode 100644 index 0000000000000000000000000000000000000000..f6f1737b21aca456e20f1cff8a407e7da94e5451 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/generated.pb.go @@ -0,0 +1,1346 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/project/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Project) Reset() { *m = Project{} } +func (*Project) ProtoMessage() {} +func (*Project) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{0} +} +func (m *Project) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Project) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Project) XXX_Merge(src proto.Message) { + xxx_messageInfo_Project.Merge(m, src) +} +func (m *Project) XXX_Size() int { + return m.Size() +} +func (m *Project) XXX_DiscardUnknown() { + xxx_messageInfo_Project.DiscardUnknown(m) +} + +var xxx_messageInfo_Project proto.InternalMessageInfo + +func (m *ProjectList) Reset() { *m = ProjectList{} } +func (*ProjectList) ProtoMessage() {} +func (*ProjectList) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{1} +} +func (m *ProjectList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectList.Merge(m, src) +} +func (m *ProjectList) XXX_Size() int { + return m.Size() +} +func (m *ProjectList) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectList.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectList proto.InternalMessageInfo + +func (m *ProjectRequest) Reset() { *m = ProjectRequest{} } +func (*ProjectRequest) ProtoMessage() {} +func (*ProjectRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{2} +} +func (m *ProjectRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectRequest.Merge(m, src) +} +func (m *ProjectRequest) XXX_Size() int { + return m.Size() +} +func (m *ProjectRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectRequest proto.InternalMessageInfo + +func (m *ProjectSpec) Reset() { *m = ProjectSpec{} } +func (*ProjectSpec) ProtoMessage() {} +func (*ProjectSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{3} +} +func (m *ProjectSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectSpec.Merge(m, src) +} +func (m *ProjectSpec) XXX_Size() int { + return m.Size() +} +func (m *ProjectSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectSpec proto.InternalMessageInfo + +func (m *ProjectStatus) Reset() { *m = ProjectStatus{} } +func (*ProjectStatus) ProtoMessage() {} +func (*ProjectStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{4} +} +func (m *ProjectStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectStatus.Merge(m, src) +} +func (m *ProjectStatus) XXX_Size() int { + return m.Size() +} +func (m *ProjectStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Project)(nil), "github.com.openshift.api.project.v1.Project") + proto.RegisterType((*ProjectList)(nil), "github.com.openshift.api.project.v1.ProjectList") + proto.RegisterType((*ProjectRequest)(nil), "github.com.openshift.api.project.v1.ProjectRequest") + proto.RegisterType((*ProjectSpec)(nil), "github.com.openshift.api.project.v1.ProjectSpec") + proto.RegisterType((*ProjectStatus)(nil), "github.com.openshift.api.project.v1.ProjectStatus") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/project/v1/generated.proto", fileDescriptor_fbf46eaac05029bf) +} + +var fileDescriptor_fbf46eaac05029bf = []byte{ + // 570 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x93, 0x3d, 0x8f, 0xd3, 0x30, + 0x18, 0xc7, 0x9b, 0xf6, 0x7a, 0x5c, 0x5d, 0xee, 0x84, 0xc2, 0x52, 0x75, 0x48, 0x4b, 0x90, 0x50, + 0x07, 0x70, 0x68, 0x79, 0x11, 0x73, 0x40, 0x08, 0x24, 0x5e, 0x0e, 0xb3, 0x55, 0x0c, 0xb8, 0xa9, + 0x9b, 0x9a, 0x5e, 0x62, 0x13, 0xbb, 0x95, 0x8e, 0x89, 0x8f, 0xc0, 0xce, 0xe7, 0x60, 0x65, 0xee, + 0x78, 0xe3, 0x4d, 0xd5, 0x35, 0x7c, 0x8b, 0x9b, 0x90, 0x1d, 0x37, 0x09, 0x5c, 0x91, 0xee, 0x16, + 0xb6, 0xfa, 0xc9, 0xff, 0xf7, 0xb3, 0xfd, 0x3c, 0x2e, 0x78, 0x10, 0x52, 0x39, 0x9d, 0x8f, 0x60, + 0xc0, 0x22, 0x8f, 0x71, 0x12, 0x8b, 0x29, 0x9d, 0x48, 0x0f, 0x73, 0xea, 0xf1, 0x84, 0x7d, 0x22, + 0x81, 0xf4, 0x16, 0x7d, 0x2f, 0x24, 0x31, 0x49, 0xb0, 0x24, 0x63, 0xc8, 0x13, 0x26, 0x99, 0x7d, + 0xbb, 0x80, 0x60, 0x0e, 0x41, 0xcc, 0x29, 0x34, 0x10, 0x5c, 0xf4, 0xdb, 0xf7, 0x4a, 0xe6, 0x90, + 0x85, 0xcc, 0xd3, 0xec, 0x68, 0x3e, 0xd1, 0x2b, 0xbd, 0xd0, 0xbf, 0x32, 0x67, 0xdb, 0x9d, 0x3d, + 0x11, 0x90, 0x32, 0xbd, 0x75, 0xc0, 0x12, 0xb2, 0x65, 0xdf, 0xf6, 0xc3, 0x22, 0x13, 0xe1, 0x60, + 0x4a, 0x63, 0x92, 0x1c, 0x7b, 0x7c, 0x16, 0xaa, 0x82, 0xf0, 0x22, 0x22, 0xf1, 0x36, 0xea, 0xf1, + 0xbf, 0xa8, 0x64, 0x1e, 0x4b, 0x1a, 0x11, 0x4f, 0x04, 0x53, 0x12, 0xe1, 0xbf, 0x39, 0xf7, 0x7b, + 0x15, 0x5c, 0x3b, 0xcc, 0xee, 0x63, 0x7f, 0x04, 0x7b, 0x4a, 0x3f, 0xc6, 0x12, 0xb7, 0xac, 0xae, + 0xd5, 0x6b, 0x0e, 0xee, 0xc3, 0x4c, 0x0b, 0xcb, 0x5a, 0xc8, 0x67, 0xa1, 0x2a, 0x08, 0xa8, 0xd2, + 0x70, 0xd1, 0x87, 0x6f, 0x47, 0x8a, 0x7f, 0x4d, 0x24, 0xf6, 0xed, 0xe5, 0xaa, 0x53, 0x49, 0x57, + 0x1d, 0x50, 0xd4, 0x50, 0x6e, 0xb5, 0x11, 0xd8, 0x11, 0x9c, 0x04, 0xad, 0xaa, 0xb1, 0x5f, 0xa2, + 0xc5, 0xd0, 0x9c, 0xee, 0x3d, 0x27, 0x81, 0x7f, 0xdd, 0xd8, 0x77, 0xd4, 0x0a, 0x69, 0x97, 0x3d, + 0x04, 0xbb, 0x42, 0x62, 0x39, 0x17, 0xad, 0x9a, 0xb6, 0x0e, 0xae, 0x64, 0xd5, 0xa4, 0x7f, 0x60, + 0xbc, 0xbb, 0xd9, 0x1a, 0x19, 0xa3, 0xfb, 0xd3, 0x02, 0x4d, 0x93, 0x7c, 0x45, 0x85, 0xb4, 0x3f, + 0x5c, 0xe8, 0x10, 0xbc, 0x5c, 0x87, 0x14, 0xad, 0xfb, 0x73, 0xc3, 0xec, 0xb4, 0xb7, 0xa9, 0x94, + 0xba, 0xf3, 0x0e, 0xd4, 0xa9, 0x24, 0x91, 0x68, 0x55, 0xbb, 0xb5, 0x5e, 0x73, 0x70, 0xf7, 0x2a, + 0x17, 0xf1, 0xf7, 0x8d, 0xb8, 0xfe, 0x52, 0x29, 0x50, 0x66, 0x72, 0xcf, 0x2c, 0x70, 0x60, 0x12, + 0x88, 0x7c, 0x9e, 0x13, 0xf1, 0x3f, 0xa6, 0xfc, 0x08, 0x34, 0xc7, 0x54, 0xf0, 0x23, 0x7c, 0xfc, + 0x06, 0x47, 0x44, 0x0f, 0xbb, 0xe1, 0xdf, 0x34, 0x48, 0xf3, 0x59, 0xf1, 0x09, 0x95, 0x73, 0x1a, + 0x23, 0x22, 0x48, 0x28, 0x97, 0x94, 0xc5, 0x7a, 0x9a, 0x65, 0xac, 0xf8, 0x84, 0xca, 0x39, 0x17, + 0xe7, 0x23, 0x52, 0x8f, 0xc2, 0x46, 0x00, 0x4c, 0x68, 0x8c, 0x8f, 0xe8, 0x17, 0x92, 0x88, 0x96, + 0xd5, 0xad, 0xf5, 0x1a, 0xfe, 0x40, 0x1d, 0xf5, 0x79, 0x5e, 0x3d, 0x5f, 0x75, 0xba, 0x17, 0xff, + 0x88, 0x30, 0x0f, 0xe8, 0xa3, 0x95, 0x2c, 0xee, 0x0f, 0x0b, 0xec, 0xff, 0xf1, 0x60, 0xec, 0x17, + 0xa0, 0xce, 0xa7, 0x58, 0x10, 0xdd, 0xc1, 0x86, 0x3f, 0xd8, 0x34, 0xff, 0x50, 0x15, 0xcf, 0x57, + 0x9d, 0x5b, 0x5b, 0xfc, 0x4a, 0x2b, 0x38, 0x0e, 0x88, 0x0e, 0xa1, 0x4c, 0x60, 0x0f, 0x01, 0x08, + 0x58, 0x3c, 0xa6, 0xea, 0x2e, 0x9b, 0xc9, 0xdf, 0x29, 0x0d, 0x04, 0x2a, 0x1c, 0x96, 0xf1, 0xa7, + 0x9b, 0x78, 0x31, 0x86, 0xbc, 0x24, 0x50, 0xc9, 0xe6, 0xf7, 0x96, 0x6b, 0xa7, 0x72, 0xb2, 0x76, + 0x2a, 0xa7, 0x6b, 0xa7, 0xf2, 0x35, 0x75, 0xac, 0x65, 0xea, 0x58, 0x27, 0xa9, 0x63, 0x9d, 0xa6, + 0x8e, 0x75, 0x96, 0x3a, 0xd6, 0xb7, 0x5f, 0x4e, 0x65, 0x58, 0x5d, 0xf4, 0x7f, 0x07, 0x00, 0x00, + 0xff, 0xff, 0x0a, 0xd0, 0xf2, 0xe0, 0x22, 0x05, 0x00, 0x00, +} + +func (m *Project) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Project) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Project) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProjectList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProjectRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x1a + i -= len(m.DisplayName) + copy(dAtA[i:], m.DisplayName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProjectSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Finalizers) > 0 { + for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Finalizers[iNdEx]) + copy(dAtA[i:], m.Finalizers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ProjectStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Project) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ProjectList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ProjectRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DisplayName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ProjectSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ProjectStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Project) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Project{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ProjectSpec", "ProjectSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ProjectStatus", "ProjectStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Project{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Project", "Project", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ProjectList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ProjectRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProjectRequest{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DisplayName:` + fmt.Sprintf("%v", this.DisplayName) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProjectSpec{`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]NamespaceCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ProjectStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Project) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Project: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Project: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Project{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DisplayName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DisplayName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, k8s_io_api_core_v1.FinalizerName(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = k8s_io_api_core_v1.NamespacePhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v11.NamespaceCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/openshift/api/project/v1/generated.proto b/vendor/github.com/openshift/api/project/v1/generated.proto new file mode 100644 index 0000000000000000000000000000000000000000..2baaf6a031bd08e572c600b62022974467696c9a --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/generated.proto @@ -0,0 +1,75 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package github.com.openshift.api.project.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, +// a quota on the resources that the project may consume, and the security controls on the resources in +// the project. Within a project, members may have different roles - project administrators can set +// membership, editors can create and manage the resources, and viewers can see but not access running +// containers. In a normal cluster project administrators are not able to alter their quotas - that is +// restricted to cluster administrators. +// +// Listing or watching projects will return only projects the user has the reader role on. +// +// An OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed +// as editable to end users while namespaces are not. Direct creation of a project is typically restricted +// to administrators, while end users should use the requestproject resource. +message Project { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the behavior of the Namespace. + optional ProjectSpec spec = 2; + + // Status describes the current status of a Namespace + // +optional + optional ProjectStatus status = 3; +} + +// ProjectList is a list of Project objects. +message ProjectList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of projects + repeated Project items = 2; +} + +// ProjecRequest is the set of options necessary to fully qualify a project request +message ProjectRequest { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // DisplayName is the display name to apply to a project + optional string displayName = 2; + + // Description is the description to apply to a project + optional string description = 3; +} + +// ProjectSpec describes the attributes on a Project +message ProjectSpec { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + repeated string finalizers = 1; +} + +// ProjectStatus is information about the current status of a Project +message ProjectStatus { + // Phase is the current lifecycle phase of the project + // +optional + optional string phase = 1; + + // Represents the latest available observations of the project current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated k8s.io.api.core.v1.NamespaceCondition conditions = 2; +} + diff --git a/vendor/github.com/openshift/api/project/v1/legacy.go b/vendor/github.com/openshift/api/project/v1/legacy.go new file mode 100644 index 0000000000000000000000000000000000000000..186f905f3a8ebdd3b96f2ddb9a4ec7a8e3078629 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/legacy.go @@ -0,0 +1,23 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Project{}, + &ProjectList{}, + &ProjectRequest{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/project/v1/register.go b/vendor/github.com/openshift/api/project/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..e471716ce8e2aab27e92c1c2c1704fd94e8bc0f8 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/register.go @@ -0,0 +1,40 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "project.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Project{}, + &ProjectList{}, + &ProjectRequest{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/project/v1/types.go b/vendor/github.com/openshift/api/project/v1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..dea150f12ff05acb7ee0c872945afebcc1f83280 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/types.go @@ -0,0 +1,93 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ProjectList is a list of Project objects. +type ProjectList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of projects + Items []Project `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +const ( + // These are internal finalizer values to Origin + FinalizerOrigin corev1.FinalizerName = "openshift.io/origin" + // ProjectNodeSelector is an annotation that holds the node selector; + // the node selector annotation determines which nodes will have pods from this project scheduled to them + ProjectNodeSelector = "openshift.io/node-selector" + + // ProjectRequesterAnnotation is the username that requested a given project. Its not guaranteed to be present, + // but it is set by the default project template. + ProjectRequesterAnnotation = "openshift.io/requester" +) + +// ProjectSpec describes the attributes on a Project +type ProjectSpec struct { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []corev1.FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=k8s.io/api/core/v1.FinalizerName"` +} + +// ProjectStatus is information about the current status of a Project +type ProjectStatus struct { + // Phase is the current lifecycle phase of the project + // +optional + Phase corev1.NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=k8s.io/api/core/v1.NamespacePhase"` + + // Represents the latest available observations of the project current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []corev1.NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, +// a quota on the resources that the project may consume, and the security controls on the resources in +// the project. Within a project, members may have different roles - project administrators can set +// membership, editors can create and manage the resources, and viewers can see but not access running +// containers. In a normal cluster project administrators are not able to alter their quotas - that is +// restricted to cluster administrators. +// +// Listing or watching projects will return only projects the user has the reader role on. +// +// An OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed +// as editable to end users while namespaces are not. Direct creation of a project is typically restricted +// to administrators, while end users should use the requestproject resource. +type Project struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the behavior of the Namespace. + Spec ProjectSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status describes the current status of a Namespace + // +optional + Status ProjectStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +// +genclient:nonNamespaced +// +genclient:skipVerbs=get,list,create,update,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=Project + +// ProjecRequest is the set of options necessary to fully qualify a project request +type ProjectRequest struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // DisplayName is the display name to apply to a project + DisplayName string `json:"displayName,omitempty" protobuf:"bytes,2,opt,name=displayName"` + // Description is the description to apply to a project + Description string `json:"description,omitempty" protobuf:"bytes,3,opt,name=description"` +} diff --git a/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..763383030f480f817b8c8c34be68e6d87a559d39 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go @@ -0,0 +1,141 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Project) DeepCopyInto(out *Project) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. +func (in *Project) DeepCopy() *Project { + if in == nil { + return nil + } + out := new(Project) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Project) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectList) DeepCopyInto(out *ProjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Project, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList. +func (in *ProjectList) DeepCopy() *ProjectList { + if in == nil { + return nil + } + out := new(ProjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectRequest) DeepCopyInto(out *ProjectRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectRequest. +func (in *ProjectRequest) DeepCopy() *ProjectRequest { + if in == nil { + return nil + } + out := new(ProjectRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { + *out = *in + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]corev1.FinalizerName, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec. +func (in *ProjectSpec) DeepCopy() *ProjectSpec { + if in == nil { + return nil + } + out := new(ProjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]corev1.NamespaceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus. +func (in *ProjectStatus) DeepCopy() *ProjectStatus { + if in == nil { + return nil + } + out := new(ProjectStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..080f2677ae10bd4c5fd0320c3de377473865a75f --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,62 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Project = map[string]string{ + "": "Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, a quota on the resources that the project may consume, and the security controls on the resources in the project. Within a project, members may have different roles - project administrators can set membership, editors can create and manage the resources, and viewers can see but not access running containers. In a normal cluster project administrators are not able to alter their quotas - that is restricted to cluster administrators.\n\nListing or watching projects will return only projects the user has the reader role on.\n\nAn OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed as editable to end users while namespaces are not. Direct creation of a project is typically restricted to administrators, while end users should use the requestproject resource.", + "spec": "Spec defines the behavior of the Namespace.", + "status": "Status describes the current status of a Namespace", +} + +func (Project) SwaggerDoc() map[string]string { + return map_Project +} + +var map_ProjectList = map[string]string{ + "": "ProjectList is a list of Project objects.", + "items": "Items is the list of projects", +} + +func (ProjectList) SwaggerDoc() map[string]string { + return map_ProjectList +} + +var map_ProjectRequest = map[string]string{ + "": "ProjecRequest is the set of options necessary to fully qualify a project request", + "displayName": "DisplayName is the display name to apply to a project", + "description": "Description is the description to apply to a project", +} + +func (ProjectRequest) SwaggerDoc() map[string]string { + return map_ProjectRequest +} + +var map_ProjectSpec = map[string]string{ + "": "ProjectSpec describes the attributes on a Project", + "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage", +} + +func (ProjectSpec) SwaggerDoc() map[string]string { + return map_ProjectSpec +} + +var map_ProjectStatus = map[string]string{ + "": "ProjectStatus is information about the current status of a Project", + "phase": "Phase is the current lifecycle phase of the project", + "conditions": "Represents the latest available observations of the project current state.", +} + +func (ProjectStatus) SwaggerDoc() map[string]string { + return map_ProjectStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/quota/OWNERS b/vendor/github.com/openshift/api/quota/OWNERS new file mode 100644 index 0000000000000000000000000000000000000000..75dbd7b5664f7e5c252d18f136aa6c93d009c176 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/OWNERS @@ -0,0 +1,3 @@ +reviewers: + - deads2k + - mfojtik diff --git a/vendor/github.com/openshift/api/quota/install.go b/vendor/github.com/openshift/api/quota/install.go new file mode 100644 index 0000000000000000000000000000000000000000..2a88e7d0a4f7ffc5f0dd14932bf25d358465c0db --- /dev/null +++ b/vendor/github.com/openshift/api/quota/install.go @@ -0,0 +1,26 @@ +package quota + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + quotav1 "github.com/openshift/api/quota/v1" +) + +const ( + GroupName = "quota.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(quotav1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/quota/v1/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml b/vendor/github.com/openshift/api/quota/v1/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8c425675c889553a92ef15840a8706955b8718ad --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml @@ -0,0 +1,221 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterresourcequotas.quota.openshift.io +spec: + group: quota.openshift.io + names: + kind: ClusterResourceQuota + listKind: ClusterResourceQuotaList + plural: clusterresourcequotas + singular: clusterresourcequota + preserveUnknownFields: false + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This + object is easily convertible to synthetic ResourceQuota object to allow quota + evaluation re-use. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired quota + properties: + quota: + description: Quota defines the desired quota + properties: + hard: + additionalProperties: + type: "" + x-kubernetes-int-or-string: true + description: 'hard is the set of desired hard limits for each named + resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/' + type: object + scopeSelector: + description: scopeSelector is also a collection of filters like + scopes that must match each object tracked by a quota but expressed + using ScopeSelectorOperator in combination with possible values. + For a resource to match, both scopes AND scopeSelector (if specified + in spec), must be matched. + properties: + matchExpressions: + description: A list of scope selector requirements by scope + of the resources. + items: + description: A scoped-resource selector requirement is a selector + that contains values, a scope name, and an operator that + relates the scope name and values. + properties: + operator: + description: Represents a scope's relationship to a set + of values. Valid operators are In, NotIn, Exists, DoesNotExist. + type: string + scopeName: + description: The name of the scope that the selector applies + to. + type: string + values: + description: An array of string values. If the operator + is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - operator + - scopeName + type: object + type: array + type: object + scopes: + description: A collection of filters that must match each object + tracked by a quota. If not specified, the quota matches all objects. + items: + description: A ResourceQuotaScope defines a filter that must match + each object tracked by a quota + type: string + type: array + type: object + selector: + description: Selector is the selector used to match projects. It should + only select active projects on the scale of dozens (though it can + select many more less active projects). These projects will contend + on object creation through this resource. + properties: + annotations: + additionalProperties: + type: string + description: AnnotationSelector is used to select projects by annotation. + nullable: true + type: object + labels: + description: LabelSelector is used to select projects by label. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + type: object + required: + - quota + - selector + type: object + status: + description: Status defines the actual enforced quota and its current usage + properties: + namespaces: + description: Namespaces slices the usage by project. This division + allows for quick resolution of deletion reconciliation inside of a + single project without requiring a recalculation across all projects. This + can be used to pull the deltas for a given project. + items: + description: ResourceQuotaStatusByNamespace gives status for a particular + project + properties: + namespace: + description: Namespace the project this status applies to + type: string + status: + description: Status indicates how many resources have been consumed + by this project + properties: + hard: + additionalProperties: + type: string + description: 'Hard is the set of enforced hard limits for + each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/' + type: object + used: + additionalProperties: + type: string + description: Used is the current observed total usage of the + resource in the namespace. + type: object + type: object + required: + - namespace + - status + type: object + nullable: true + type: array + total: + description: Total defines the actual enforced quota and its current + usage across all projects + properties: + hard: + additionalProperties: + type: string + description: 'Hard is the set of enforced hard limits for each named + resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/' + type: object + used: + additionalProperties: + type: string + description: Used is the current observed total usage of the resource + in the namespace. + type: object + type: object + required: + - total + type: object + required: + - metadata + - spec + type: object + versions: + - name: v1 + served: true + storage: true diff --git a/vendor/github.com/openshift/api/quota/v1/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml-merge-patch b/vendor/github.com/openshift/api/quota/v1/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml-merge-patch new file mode 100644 index 0000000000000000000000000000000000000000..1897fdbee9ed0b38302d76f9a5b16d0cd3d528bc --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml-merge-patch @@ -0,0 +1,13 @@ +spec: + validation: + openAPIV3Schema: + properties: + spec: + properties: + quota: + properties: + hard: + additionalProperties: + type: "" + x-kubernetes-int-or-string: true + diff --git a/vendor/github.com/openshift/api/quota/v1/doc.go b/vendor/github.com/openshift/api/quota/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..ae5c9c2c762ae994d9fac55c0414103c4b21292d --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/quota/apis/quota +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=quota.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/quota/v1/generated.pb.go b/vendor/github.com/openshift/api/quota/v1/generated.pb.go new file mode 100644 index 0000000000000000000000000000000000000000..8da48a79ff5ddff0cf3b9efed025ebfe73c6b1e2 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/generated.pb.go @@ -0,0 +1,2202 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/quota/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *AppliedClusterResourceQuota) Reset() { *m = AppliedClusterResourceQuota{} } +func (*AppliedClusterResourceQuota) ProtoMessage() {} +func (*AppliedClusterResourceQuota) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{0} +} +func (m *AppliedClusterResourceQuota) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AppliedClusterResourceQuota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AppliedClusterResourceQuota) XXX_Merge(src proto.Message) { + xxx_messageInfo_AppliedClusterResourceQuota.Merge(m, src) +} +func (m *AppliedClusterResourceQuota) XXX_Size() int { + return m.Size() +} +func (m *AppliedClusterResourceQuota) XXX_DiscardUnknown() { + xxx_messageInfo_AppliedClusterResourceQuota.DiscardUnknown(m) +} + +var xxx_messageInfo_AppliedClusterResourceQuota proto.InternalMessageInfo + +func (m *AppliedClusterResourceQuotaList) Reset() { *m = AppliedClusterResourceQuotaList{} } +func (*AppliedClusterResourceQuotaList) ProtoMessage() {} +func (*AppliedClusterResourceQuotaList) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{1} +} +func (m *AppliedClusterResourceQuotaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AppliedClusterResourceQuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AppliedClusterResourceQuotaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_AppliedClusterResourceQuotaList.Merge(m, src) +} +func (m *AppliedClusterResourceQuotaList) XXX_Size() int { + return m.Size() +} +func (m *AppliedClusterResourceQuotaList) XXX_DiscardUnknown() { + xxx_messageInfo_AppliedClusterResourceQuotaList.DiscardUnknown(m) +} + +var xxx_messageInfo_AppliedClusterResourceQuotaList proto.InternalMessageInfo + +func (m *ClusterResourceQuota) Reset() { *m = ClusterResourceQuota{} } +func (*ClusterResourceQuota) ProtoMessage() {} +func (*ClusterResourceQuota) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{2} +} +func (m *ClusterResourceQuota) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterResourceQuota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterResourceQuota) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterResourceQuota.Merge(m, src) +} +func (m *ClusterResourceQuota) XXX_Size() int { + return m.Size() +} +func (m *ClusterResourceQuota) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterResourceQuota.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterResourceQuota proto.InternalMessageInfo + +func (m *ClusterResourceQuotaList) Reset() { *m = ClusterResourceQuotaList{} } +func (*ClusterResourceQuotaList) ProtoMessage() {} +func (*ClusterResourceQuotaList) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{3} +} +func (m *ClusterResourceQuotaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterResourceQuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterResourceQuotaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterResourceQuotaList.Merge(m, src) +} +func (m *ClusterResourceQuotaList) XXX_Size() int { + return m.Size() +} +func (m *ClusterResourceQuotaList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterResourceQuotaList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterResourceQuotaList proto.InternalMessageInfo + +func (m *ClusterResourceQuotaSelector) Reset() { *m = ClusterResourceQuotaSelector{} } +func (*ClusterResourceQuotaSelector) ProtoMessage() {} +func (*ClusterResourceQuotaSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{4} +} +func (m *ClusterResourceQuotaSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterResourceQuotaSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterResourceQuotaSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterResourceQuotaSelector.Merge(m, src) +} +func (m *ClusterResourceQuotaSelector) XXX_Size() int { + return m.Size() +} +func (m *ClusterResourceQuotaSelector) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterResourceQuotaSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterResourceQuotaSelector proto.InternalMessageInfo + +func (m *ClusterResourceQuotaSpec) Reset() { *m = ClusterResourceQuotaSpec{} } +func (*ClusterResourceQuotaSpec) ProtoMessage() {} +func (*ClusterResourceQuotaSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{5} +} +func (m *ClusterResourceQuotaSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterResourceQuotaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterResourceQuotaSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterResourceQuotaSpec.Merge(m, src) +} +func (m *ClusterResourceQuotaSpec) XXX_Size() int { + return m.Size() +} +func (m *ClusterResourceQuotaSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterResourceQuotaSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterResourceQuotaSpec proto.InternalMessageInfo + +func (m *ClusterResourceQuotaStatus) Reset() { *m = ClusterResourceQuotaStatus{} } +func (*ClusterResourceQuotaStatus) ProtoMessage() {} +func (*ClusterResourceQuotaStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{6} +} +func (m *ClusterResourceQuotaStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterResourceQuotaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterResourceQuotaStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterResourceQuotaStatus.Merge(m, src) +} +func (m *ClusterResourceQuotaStatus) XXX_Size() int { + return m.Size() +} +func (m *ClusterResourceQuotaStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterResourceQuotaStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterResourceQuotaStatus proto.InternalMessageInfo + +func (m *ResourceQuotaStatusByNamespace) Reset() { *m = ResourceQuotaStatusByNamespace{} } +func (*ResourceQuotaStatusByNamespace) ProtoMessage() {} +func (*ResourceQuotaStatusByNamespace) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{7} +} +func (m *ResourceQuotaStatusByNamespace) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuotaStatusByNamespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceQuotaStatusByNamespace) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuotaStatusByNamespace.Merge(m, src) +} +func (m *ResourceQuotaStatusByNamespace) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuotaStatusByNamespace) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuotaStatusByNamespace.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceQuotaStatusByNamespace proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AppliedClusterResourceQuota)(nil), "github.com.openshift.api.quota.v1.AppliedClusterResourceQuota") + proto.RegisterType((*AppliedClusterResourceQuotaList)(nil), "github.com.openshift.api.quota.v1.AppliedClusterResourceQuotaList") + proto.RegisterType((*ClusterResourceQuota)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuota") + proto.RegisterType((*ClusterResourceQuotaList)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaList") + proto.RegisterType((*ClusterResourceQuotaSelector)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaSelector") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaSelector.AnnotationsEntry") + proto.RegisterType((*ClusterResourceQuotaSpec)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaSpec") + proto.RegisterType((*ClusterResourceQuotaStatus)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaStatus") + proto.RegisterType((*ResourceQuotaStatusByNamespace)(nil), "github.com.openshift.api.quota.v1.ResourceQuotaStatusByNamespace") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/quota/v1/generated.proto", fileDescriptor_f605e5b8440aecb8) +} + +var fileDescriptor_f605e5b8440aecb8 = []byte{ + // 715 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x41, 0x6f, 0xd3, 0x48, + 0x18, 0x8d, 0xdd, 0xa6, 0x6a, 0xa6, 0xdb, 0x55, 0x3b, 0xea, 0x21, 0xca, 0xae, 0x9c, 0xae, 0xa5, + 0x15, 0xbd, 0x30, 0x26, 0x05, 0x41, 0x05, 0xa2, 0xa8, 0x46, 0x1c, 0x40, 0x85, 0x82, 0xe1, 0x84, + 0x0a, 0x62, 0xe2, 0x4e, 0x13, 0x13, 0xdb, 0x63, 0x3c, 0xe3, 0x48, 0xb9, 0xf1, 0x0b, 0x10, 0xbf, + 0x81, 0x1f, 0xc2, 0x0d, 0xa9, 0x37, 0x7a, 0x01, 0xf5, 0x54, 0x11, 0xc3, 0x0f, 0x41, 0x33, 0x9e, + 0xd8, 0x69, 0x9b, 0xb4, 0xa1, 0x3d, 0x70, 0xe1, 0xe6, 0xf9, 0x32, 0xef, 0xbd, 0x79, 0xcf, 0x6f, + 0x1c, 0xd0, 0x68, 0x79, 0xbc, 0x9d, 0x34, 0x91, 0x4b, 0x03, 0x8b, 0x46, 0x24, 0x64, 0x6d, 0x6f, + 0x97, 0x5b, 0x38, 0xf2, 0xac, 0x37, 0x09, 0xe5, 0xd8, 0xea, 0x36, 0xac, 0x16, 0x09, 0x49, 0x8c, + 0x39, 0xd9, 0x41, 0x51, 0x4c, 0x39, 0x85, 0xff, 0x15, 0x10, 0x94, 0x43, 0x10, 0x8e, 0x3c, 0x24, + 0x21, 0xa8, 0xdb, 0xa8, 0x5d, 0x1e, 0x62, 0x6d, 0xd1, 0x16, 0xb5, 0x24, 0xb2, 0x99, 0xec, 0xca, + 0x95, 0x5c, 0xc8, 0xa7, 0x8c, 0xb1, 0x66, 0x76, 0xd6, 0x18, 0xf2, 0xa8, 0x94, 0x75, 0x69, 0x4c, + 0x46, 0xa8, 0xd6, 0xae, 0x15, 0x7b, 0x02, 0xec, 0xb6, 0xbd, 0x90, 0xc4, 0x3d, 0x2b, 0xea, 0xb4, + 0xc4, 0x80, 0x59, 0x01, 0x19, 0x79, 0xd6, 0xda, 0xf5, 0x71, 0xa8, 0x38, 0x09, 0xb9, 0x17, 0x10, + 0x8b, 0xb9, 0x6d, 0x12, 0xe0, 0xe3, 0x38, 0xf3, 0x93, 0x0e, 0xfe, 0xd9, 0x88, 0x22, 0xdf, 0x23, + 0x3b, 0x77, 0xfd, 0x84, 0x71, 0x12, 0x3b, 0x84, 0xd1, 0x24, 0x76, 0xc9, 0x13, 0xe1, 0x11, 0xbe, + 0x02, 0xb3, 0x42, 0x72, 0x07, 0x73, 0x5c, 0xd5, 0x96, 0xb5, 0x95, 0xb9, 0xd5, 0x2b, 0x28, 0x93, + 0x42, 0xc3, 0x52, 0x28, 0xea, 0xb4, 0xc4, 0x80, 0x21, 0xb1, 0x1b, 0x75, 0x1b, 0x68, 0xab, 0xf9, + 0x9a, 0xb8, 0xfc, 0x21, 0xe1, 0xd8, 0x86, 0x7b, 0x87, 0xf5, 0x52, 0x7a, 0x58, 0x07, 0xc5, 0xcc, + 0xc9, 0x59, 0xe1, 0x0b, 0x30, 0xcd, 0x22, 0xe2, 0x56, 0x75, 0xc9, 0x7e, 0x0b, 0x9d, 0x19, 0x3a, + 0x1a, 0x75, 0xd0, 0xa7, 0x11, 0x71, 0xed, 0xbf, 0x94, 0xd0, 0xb4, 0x58, 0x39, 0x92, 0x16, 0x12, + 0x30, 0xc3, 0x38, 0xe6, 0x09, 0xab, 0x4e, 0x49, 0x81, 0xdb, 0xe7, 0x15, 0x90, 0x24, 0xf6, 0xdf, + 0x4a, 0x62, 0x26, 0x5b, 0x3b, 0x8a, 0xdc, 0xfc, 0xa1, 0x81, 0xfa, 0x29, 0x39, 0x6e, 0x7a, 0x8c, + 0xc3, 0xed, 0x13, 0x59, 0xa2, 0xc9, 0xb2, 0x14, 0x68, 0x99, 0xe4, 0x82, 0x52, 0x9f, 0x1d, 0x4c, + 0x86, 0x72, 0x74, 0x41, 0xd9, 0xe3, 0x24, 0x60, 0x55, 0x7d, 0x79, 0x6a, 0x65, 0x6e, 0x75, 0x7d, + 0x02, 0x9f, 0xa7, 0x1c, 0xd8, 0x9e, 0x57, 0x52, 0xe5, 0xfb, 0x82, 0xd4, 0xc9, 0xb8, 0xcd, 0x8f, + 0x3a, 0x58, 0xfa, 0xd3, 0x93, 0x0b, 0xf4, 0xe4, 0xab, 0x06, 0xaa, 0xbf, 0xa9, 0x20, 0xdb, 0x47, + 0x0b, 0x72, 0xe3, 0x9c, 0x06, 0xc7, 0x34, 0xe3, 0xb3, 0x0e, 0xfe, 0x1d, 0x99, 0x07, 0xf1, 0x89, + 0xcb, 0x69, 0x0c, 0x5f, 0x82, 0x19, 0x1f, 0x37, 0x89, 0xcf, 0x94, 0xb5, 0xab, 0x13, 0x5a, 0x13, + 0x98, 0x01, 0x89, 0xbd, 0x98, 0x1e, 0xd6, 0xe7, 0x8f, 0x8c, 0x1c, 0xc5, 0x0a, 0xdf, 0x69, 0x60, + 0x0e, 0x87, 0x21, 0xe5, 0x98, 0x7b, 0x34, 0x1c, 0xb8, 0x7c, 0x7c, 0xde, 0xd7, 0xa8, 0xe8, 0xd1, + 0x46, 0x41, 0x79, 0x2f, 0xe4, 0x71, 0xcf, 0xae, 0x29, 0xfb, 0xb0, 0xf8, 0x25, 0x3f, 0xcb, 0xf0, + 0x01, 0x6a, 0xeb, 0x60, 0xe1, 0x38, 0x18, 0x2e, 0x80, 0xa9, 0x0e, 0xe9, 0xc9, 0x04, 0x2a, 0x8e, + 0x78, 0x84, 0x4b, 0xa0, 0xdc, 0xc5, 0x7e, 0x42, 0x64, 0xaf, 0x2b, 0x4e, 0xb6, 0xb8, 0xa9, 0xaf, + 0x69, 0xe6, 0x97, 0x31, 0x55, 0x11, 0xa5, 0x85, 0x01, 0x98, 0x65, 0x4a, 0x55, 0xe5, 0x79, 0xe7, + 0x82, 0x4e, 0x8b, 0xee, 0xe4, 0x76, 0x72, 0x09, 0xf8, 0x00, 0x94, 0x25, 0x89, 0xba, 0x7d, 0xff, + 0x0f, 0xbd, 0x3b, 0x24, 0xfe, 0xc8, 0x04, 0xf9, 0xc9, 0x7b, 0x96, 0x37, 0x45, 0x8e, 0x9c, 0x8c, + 0xc2, 0xec, 0x6b, 0xa0, 0x36, 0xfe, 0xe6, 0xc0, 0x4d, 0x50, 0xe6, 0x94, 0x63, 0x5f, 0xd9, 0xba, + 0x74, 0xb6, 0x54, 0x76, 0xe3, 0x72, 0xb1, 0x67, 0x02, 0xed, 0x64, 0x24, 0x30, 0x01, 0x20, 0xc4, + 0x01, 0x61, 0x11, 0x76, 0xc9, 0xa0, 0x13, 0x1b, 0x13, 0x24, 0x35, 0x4a, 0xa1, 0xf7, 0x68, 0xc0, + 0x54, 0x7c, 0xaa, 0xf2, 0x11, 0x73, 0x86, 0x84, 0xcc, 0x0f, 0x1a, 0x30, 0x4e, 0xa7, 0x80, 0x16, + 0xa8, 0xe4, 0x80, 0xac, 0x10, 0xf6, 0xa2, 0x62, 0xad, 0xe4, 0xbb, 0x9c, 0x62, 0x0f, 0xdc, 0xca, + 0xbf, 0x50, 0xfa, 0xaf, 0x25, 0x33, 0xe6, 0x5b, 0x64, 0xaf, 0xec, 0xf5, 0x8d, 0xd2, 0x7e, 0xdf, + 0x28, 0x1d, 0xf4, 0x8d, 0xd2, 0xdb, 0xd4, 0xd0, 0xf6, 0x52, 0x43, 0xdb, 0x4f, 0x0d, 0xed, 0x20, + 0x35, 0xb4, 0x6f, 0xa9, 0xa1, 0xbd, 0xff, 0x6e, 0x94, 0x9e, 0xeb, 0xdd, 0xc6, 0xcf, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xa7, 0x76, 0xc9, 0x6f, 0x3d, 0x09, 0x00, 0x00, +} + +func (m *AppliedClusterResourceQuota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AppliedClusterResourceQuota) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AppliedClusterResourceQuota) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AppliedClusterResourceQuotaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AppliedClusterResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AppliedClusterResourceQuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterResourceQuota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterResourceQuota) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterResourceQuota) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterResourceQuotaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterResourceQuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterResourceQuotaSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterResourceQuotaSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterResourceQuotaSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AnnotationSelector) > 0 { + keysForAnnotationSelector := make([]string, 0, len(m.AnnotationSelector)) + for k := range m.AnnotationSelector { + keysForAnnotationSelector = append(keysForAnnotationSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotationSelector) + for iNdEx := len(keysForAnnotationSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.AnnotationSelector[string(keysForAnnotationSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotationSelector[iNdEx]) + copy(dAtA[i:], keysForAnnotationSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotationSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterResourceQuotaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterResourceQuotaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Quota.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterResourceQuotaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterResourceQuotaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Namespaces[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Total.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceQuotaStatusByNamespace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceQuotaStatusByNamespace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceQuotaStatusByNamespace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AppliedClusterResourceQuota) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AppliedClusterResourceQuotaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterResourceQuota) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterResourceQuotaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterResourceQuotaSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AnnotationSelector) > 0 { + for k, v := range m.AnnotationSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ClusterResourceQuotaSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Quota.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterResourceQuotaStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Total.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Namespaces) > 0 { + for _, e := range m.Namespaces { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceQuotaStatusByNamespace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AppliedClusterResourceQuota) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AppliedClusterResourceQuota{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterResourceQuotaSpec", "ClusterResourceQuotaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ClusterResourceQuotaStatus", "ClusterResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *AppliedClusterResourceQuotaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]AppliedClusterResourceQuota{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "AppliedClusterResourceQuota", "AppliedClusterResourceQuota", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&AppliedClusterResourceQuotaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterResourceQuota) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterResourceQuota{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterResourceQuotaSpec", "ClusterResourceQuotaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ClusterResourceQuotaStatus", "ClusterResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterResourceQuotaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterResourceQuota{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterResourceQuota", "ClusterResourceQuota", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterResourceQuotaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterResourceQuotaSelector) String() string { + if this == nil { + return "nil" + } + keysForAnnotationSelector := make([]string, 0, len(this.AnnotationSelector)) + for k := range this.AnnotationSelector { + keysForAnnotationSelector = append(keysForAnnotationSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotationSelector) + mapStringForAnnotationSelector := "map[string]string{" + for _, k := range keysForAnnotationSelector { + mapStringForAnnotationSelector += fmt.Sprintf("%v: %v,", k, this.AnnotationSelector[k]) + } + mapStringForAnnotationSelector += "}" + s := strings.Join([]string{`&ClusterResourceQuotaSelector{`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AnnotationSelector:` + mapStringForAnnotationSelector + `,`, + `}`, + }, "") + return s +} +func (this *ClusterResourceQuotaSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterResourceQuotaSpec{`, + `Selector:` + strings.Replace(strings.Replace(this.Selector.String(), "ClusterResourceQuotaSelector", "ClusterResourceQuotaSelector", 1), `&`, ``, 1) + `,`, + `Quota:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Quota), "ResourceQuotaSpec", "v11.ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterResourceQuotaStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForNamespaces := "[]ResourceQuotaStatusByNamespace{" + for _, f := range this.Namespaces { + repeatedStringForNamespaces += strings.Replace(strings.Replace(f.String(), "ResourceQuotaStatusByNamespace", "ResourceQuotaStatusByNamespace", 1), `&`, ``, 1) + "," + } + repeatedStringForNamespaces += "}" + s := strings.Join([]string{`&ClusterResourceQuotaStatus{`, + `Total:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Total), "ResourceQuotaStatus", "v11.ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `Namespaces:` + repeatedStringForNamespaces + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaStatusByNamespace) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceQuotaStatusByNamespace{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "ResourceQuotaStatus", "v11.ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AppliedClusterResourceQuota) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AppliedClusterResourceQuota: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AppliedClusterResourceQuota: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AppliedClusterResourceQuotaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AppliedClusterResourceQuotaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AppliedClusterResourceQuotaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, AppliedClusterResourceQuota{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterResourceQuota) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterResourceQuota: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterResourceQuota: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterResourceQuotaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterResourceQuotaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterResourceQuotaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterResourceQuota{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterResourceQuotaSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterResourceQuotaSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterResourceQuotaSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &v1.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AnnotationSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AnnotationSelector == nil { + m.AnnotationSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AnnotationSelector[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterResourceQuotaSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterResourceQuotaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterResourceQuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quota", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Quota.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterResourceQuotaStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterResourceQuotaStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterResourceQuotaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Total.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, ResourceQuotaStatusByNamespace{}) + if err := m.Namespaces[len(m.Namespaces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaStatusByNamespace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaStatusByNamespace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaStatusByNamespace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/openshift/api/quota/v1/generated.proto b/vendor/github.com/openshift/api/quota/v1/generated.proto new file mode 100644 index 0000000000000000000000000000000000000000..ba3fe4659d146a60ce1a23538dcbb95d2f25ca4b --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/generated.proto @@ -0,0 +1,104 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package github.com.openshift.api.quota.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// AppliedClusterResourceQuota mirrors ClusterResourceQuota at a project scope, for projection +// into a project. It allows a project-admin to know which ClusterResourceQuotas are applied to +// his project and their associated usage. +message AppliedClusterResourceQuota { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired quota + optional ClusterResourceQuotaSpec spec = 2; + + // Status defines the actual enforced quota and its current usage + optional ClusterResourceQuotaStatus status = 3; +} + +// AppliedClusterResourceQuotaList is a collection of AppliedClusterResourceQuotas +message AppliedClusterResourceQuotaList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of AppliedClusterResourceQuota + repeated AppliedClusterResourceQuota items = 2; +} + +// ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This object is easily convertible to +// synthetic ResourceQuota object to allow quota evaluation re-use. +message ClusterResourceQuota { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired quota + optional ClusterResourceQuotaSpec spec = 2; + + // Status defines the actual enforced quota and its current usage + optional ClusterResourceQuotaStatus status = 3; +} + +// ClusterResourceQuotaList is a collection of ClusterResourceQuotas +message ClusterResourceQuotaList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterResourceQuotas + repeated ClusterResourceQuota items = 2; +} + +// ClusterResourceQuotaSelector is used to select projects. At least one of LabelSelector or AnnotationSelector +// must present. If only one is present, it is the only selection criteria. If both are specified, +// the project must match both restrictions. +message ClusterResourceQuotaSelector { + // LabelSelector is used to select projects by label. + // +optional + // +nullable + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labels = 1; + + // AnnotationSelector is used to select projects by annotation. + // +optional + // +nullable + map<string, string> annotations = 2; +} + +// ClusterResourceQuotaSpec defines the desired quota restrictions +message ClusterResourceQuotaSpec { + // Selector is the selector used to match projects. + // It should only select active projects on the scale of dozens (though it can select + // many more less active projects). These projects will contend on object creation through + // this resource. + optional ClusterResourceQuotaSelector selector = 1; + + // Quota defines the desired quota + optional k8s.io.api.core.v1.ResourceQuotaSpec quota = 2; +} + +// ClusterResourceQuotaStatus defines the actual enforced quota and its current usage +message ClusterResourceQuotaStatus { + // Total defines the actual enforced quota and its current usage across all projects + optional k8s.io.api.core.v1.ResourceQuotaStatus total = 1; + + // Namespaces slices the usage by project. This division allows for quick resolution of + // deletion reconciliation inside of a single project without requiring a recalculation + // across all projects. This can be used to pull the deltas for a given project. + // +optional + // +nullable + repeated ResourceQuotaStatusByNamespace namespaces = 2; +} + +// ResourceQuotaStatusByNamespace gives status for a particular project +message ResourceQuotaStatusByNamespace { + // Namespace the project this status applies to + optional string namespace = 1; + + // Status indicates how many resources have been consumed by this project + optional k8s.io.api.core.v1.ResourceQuotaStatus status = 2; +} + diff --git a/vendor/github.com/openshift/api/quota/v1/legacy.go b/vendor/github.com/openshift/api/quota/v1/legacy.go new file mode 100644 index 0000000000000000000000000000000000000000..402690b5d60c51a26760d4f99f2f690ba5ba0d80 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/legacy.go @@ -0,0 +1,24 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &ClusterResourceQuota{}, + &ClusterResourceQuotaList{}, + &AppliedClusterResourceQuota{}, + &AppliedClusterResourceQuotaList{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/quota/v1/register.go b/vendor/github.com/openshift/api/quota/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..47c774ef23ec0212553241ca603b224db9509555 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/register.go @@ -0,0 +1,41 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "quota.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ClusterResourceQuota{}, + &ClusterResourceQuotaList{}, + &AppliedClusterResourceQuota{}, + &AppliedClusterResourceQuotaList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/quota/v1/types.go b/vendor/github.com/openshift/api/quota/v1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..1bac842c7b55c5e172fdda6b531bea5e9a0497e5 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/types.go @@ -0,0 +1,115 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This object is easily convertible to +// synthetic ResourceQuota object to allow quota evaluation re-use. +type ClusterResourceQuota struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired quota + Spec ClusterResourceQuotaSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status defines the actual enforced quota and its current usage + Status ClusterResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ClusterResourceQuotaSpec defines the desired quota restrictions +type ClusterResourceQuotaSpec struct { + // Selector is the selector used to match projects. + // It should only select active projects on the scale of dozens (though it can select + // many more less active projects). These projects will contend on object creation through + // this resource. + Selector ClusterResourceQuotaSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"` + + // Quota defines the desired quota + Quota corev1.ResourceQuotaSpec `json:"quota" protobuf:"bytes,2,opt,name=quota"` +} + +// ClusterResourceQuotaSelector is used to select projects. At least one of LabelSelector or AnnotationSelector +// must present. If only one is present, it is the only selection criteria. If both are specified, +// the project must match both restrictions. +type ClusterResourceQuotaSelector struct { + // LabelSelector is used to select projects by label. + // +optional + // +nullable + LabelSelector *metav1.LabelSelector `json:"labels" protobuf:"bytes,1,opt,name=labels"` + + // AnnotationSelector is used to select projects by annotation. + // +optional + // +nullable + AnnotationSelector map[string]string `json:"annotations" protobuf:"bytes,2,rep,name=annotations"` +} + +// ClusterResourceQuotaStatus defines the actual enforced quota and its current usage +type ClusterResourceQuotaStatus struct { + // Total defines the actual enforced quota and its current usage across all projects + Total corev1.ResourceQuotaStatus `json:"total" protobuf:"bytes,1,opt,name=total"` + + // Namespaces slices the usage by project. This division allows for quick resolution of + // deletion reconciliation inside of a single project without requiring a recalculation + // across all projects. This can be used to pull the deltas for a given project. + // +optional + // +nullable + Namespaces ResourceQuotasStatusByNamespace `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterResourceQuotaList is a collection of ClusterResourceQuotas +type ClusterResourceQuotaList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterResourceQuotas + Items []ClusterResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ResourceQuotasStatusByNamespace bundles multiple ResourceQuotaStatusByNamespace +type ResourceQuotasStatusByNamespace []ResourceQuotaStatusByNamespace + +// ResourceQuotaStatusByNamespace gives status for a particular project +type ResourceQuotaStatusByNamespace struct { + // Namespace the project this status applies to + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + + // Status indicates how many resources have been consumed by this project + Status corev1.ResourceQuotaStatus `json:"status" protobuf:"bytes,2,opt,name=status"` +} + +// +genclient +// +genclient:onlyVerbs=get,list +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AppliedClusterResourceQuota mirrors ClusterResourceQuota at a project scope, for projection +// into a project. It allows a project-admin to know which ClusterResourceQuotas are applied to +// his project and their associated usage. +type AppliedClusterResourceQuota struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired quota + Spec ClusterResourceQuotaSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status defines the actual enforced quota and its current usage + Status ClusterResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AppliedClusterResourceQuotaList is a collection of AppliedClusterResourceQuotas +type AppliedClusterResourceQuotaList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of AppliedClusterResourceQuota + Items []AppliedClusterResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..2a3530948985fd55ef304df669fb59d4d017b074 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go @@ -0,0 +1,241 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppliedClusterResourceQuota) DeepCopyInto(out *AppliedClusterResourceQuota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedClusterResourceQuota. +func (in *AppliedClusterResourceQuota) DeepCopy() *AppliedClusterResourceQuota { + if in == nil { + return nil + } + out := new(AppliedClusterResourceQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppliedClusterResourceQuota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppliedClusterResourceQuotaList) DeepCopyInto(out *AppliedClusterResourceQuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AppliedClusterResourceQuota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedClusterResourceQuotaList. +func (in *AppliedClusterResourceQuotaList) DeepCopy() *AppliedClusterResourceQuotaList { + if in == nil { + return nil + } + out := new(AppliedClusterResourceQuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppliedClusterResourceQuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceQuota) DeepCopyInto(out *ClusterResourceQuota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuota. +func (in *ClusterResourceQuota) DeepCopy() *ClusterResourceQuota { + if in == nil { + return nil + } + out := new(ClusterResourceQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterResourceQuota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceQuotaList) DeepCopyInto(out *ClusterResourceQuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterResourceQuota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaList. +func (in *ClusterResourceQuotaList) DeepCopy() *ClusterResourceQuotaList { + if in == nil { + return nil + } + out := new(ClusterResourceQuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterResourceQuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceQuotaSelector) DeepCopyInto(out *ClusterResourceQuotaSelector) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.AnnotationSelector != nil { + in, out := &in.AnnotationSelector, &out.AnnotationSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaSelector. +func (in *ClusterResourceQuotaSelector) DeepCopy() *ClusterResourceQuotaSelector { + if in == nil { + return nil + } + out := new(ClusterResourceQuotaSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceQuotaSpec) DeepCopyInto(out *ClusterResourceQuotaSpec) { + *out = *in + in.Selector.DeepCopyInto(&out.Selector) + in.Quota.DeepCopyInto(&out.Quota) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaSpec. +func (in *ClusterResourceQuotaSpec) DeepCopy() *ClusterResourceQuotaSpec { + if in == nil { + return nil + } + out := new(ClusterResourceQuotaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceQuotaStatus) DeepCopyInto(out *ClusterResourceQuotaStatus) { + *out = *in + in.Total.DeepCopyInto(&out.Total) + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make(ResourceQuotasStatusByNamespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaStatus. +func (in *ClusterResourceQuotaStatus) DeepCopy() *ClusterResourceQuotaStatus { + if in == nil { + return nil + } + out := new(ClusterResourceQuotaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaStatusByNamespace) DeepCopyInto(out *ResourceQuotaStatusByNamespace) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatusByNamespace. +func (in *ResourceQuotaStatusByNamespace) DeepCopy() *ResourceQuotaStatusByNamespace { + if in == nil { + return nil + } + out := new(ResourceQuotaStatusByNamespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ResourceQuotasStatusByNamespace) DeepCopyInto(out *ResourceQuotasStatusByNamespace) { + { + in := &in + *out = make(ResourceQuotasStatusByNamespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotasStatusByNamespace. +func (in ResourceQuotasStatusByNamespace) DeepCopy() ResourceQuotasStatusByNamespace { + if in == nil { + return nil + } + out := new(ResourceQuotasStatusByNamespace) + in.DeepCopyInto(out) + return *out +} diff --git a/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..71c0d6d56f8693b4e625fbc52ccda485bd526098 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,92 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AppliedClusterResourceQuota = map[string]string{ + "": "AppliedClusterResourceQuota mirrors ClusterResourceQuota at a project scope, for projection into a project. It allows a project-admin to know which ClusterResourceQuotas are applied to his project and their associated usage.", + "spec": "Spec defines the desired quota", + "status": "Status defines the actual enforced quota and its current usage", +} + +func (AppliedClusterResourceQuota) SwaggerDoc() map[string]string { + return map_AppliedClusterResourceQuota +} + +var map_AppliedClusterResourceQuotaList = map[string]string{ + "": "AppliedClusterResourceQuotaList is a collection of AppliedClusterResourceQuotas", + "items": "Items is a list of AppliedClusterResourceQuota", +} + +func (AppliedClusterResourceQuotaList) SwaggerDoc() map[string]string { + return map_AppliedClusterResourceQuotaList +} + +var map_ClusterResourceQuota = map[string]string{ + "": "ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This object is easily convertible to synthetic ResourceQuota object to allow quota evaluation re-use.", + "spec": "Spec defines the desired quota", + "status": "Status defines the actual enforced quota and its current usage", +} + +func (ClusterResourceQuota) SwaggerDoc() map[string]string { + return map_ClusterResourceQuota +} + +var map_ClusterResourceQuotaList = map[string]string{ + "": "ClusterResourceQuotaList is a collection of ClusterResourceQuotas", + "items": "Items is a list of ClusterResourceQuotas", +} + +func (ClusterResourceQuotaList) SwaggerDoc() map[string]string { + return map_ClusterResourceQuotaList +} + +var map_ClusterResourceQuotaSelector = map[string]string{ + "": "ClusterResourceQuotaSelector is used to select projects. At least one of LabelSelector or AnnotationSelector must present. If only one is present, it is the only selection criteria. If both are specified, the project must match both restrictions.", + "labels": "LabelSelector is used to select projects by label.", + "annotations": "AnnotationSelector is used to select projects by annotation.", +} + +func (ClusterResourceQuotaSelector) SwaggerDoc() map[string]string { + return map_ClusterResourceQuotaSelector +} + +var map_ClusterResourceQuotaSpec = map[string]string{ + "": "ClusterResourceQuotaSpec defines the desired quota restrictions", + "selector": "Selector is the selector used to match projects. It should only select active projects on the scale of dozens (though it can select many more less active projects). These projects will contend on object creation through this resource.", + "quota": "Quota defines the desired quota", +} + +func (ClusterResourceQuotaSpec) SwaggerDoc() map[string]string { + return map_ClusterResourceQuotaSpec +} + +var map_ClusterResourceQuotaStatus = map[string]string{ + "": "ClusterResourceQuotaStatus defines the actual enforced quota and its current usage", + "total": "Total defines the actual enforced quota and its current usage across all projects", + "namespaces": "Namespaces slices the usage by project. This division allows for quick resolution of deletion reconciliation inside of a single project without requiring a recalculation across all projects. This can be used to pull the deltas for a given project.", +} + +func (ClusterResourceQuotaStatus) SwaggerDoc() map[string]string { + return map_ClusterResourceQuotaStatus +} + +var map_ResourceQuotaStatusByNamespace = map[string]string{ + "": "ResourceQuotaStatusByNamespace gives status for a particular project", + "namespace": "Namespace the project this status applies to", + "status": "Status indicates how many resources have been consumed by this project", +} + +func (ResourceQuotaStatusByNamespace) SwaggerDoc() map[string]string { + return map_ResourceQuotaStatusByNamespace +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/route/OWNERS b/vendor/github.com/openshift/api/route/OWNERS new file mode 100644 index 0000000000000000000000000000000000000000..74038975d3160241da25576a3bd350dec04a5985 --- /dev/null +++ b/vendor/github.com/openshift/api/route/OWNERS @@ -0,0 +1,5 @@ +reviewers: + - ironcladlou + - knobunc + - pravisankar + - Miciah diff --git a/vendor/github.com/openshift/api/route/install.go b/vendor/github.com/openshift/api/route/install.go new file mode 100644 index 0000000000000000000000000000000000000000..a08536283be1f91922a6ae1d037e76516d6a8b54 --- /dev/null +++ b/vendor/github.com/openshift/api/route/install.go @@ -0,0 +1,26 @@ +package route + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + routev1 "github.com/openshift/api/route/v1" +) + +const ( + GroupName = "route.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(routev1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/route/v1/doc.go b/vendor/github.com/openshift/api/route/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..e56fbbd8d1fe3bb5448dba49edcd340bd6521a6e --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/route/apis/route +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=route.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/route/v1/generated.pb.go b/vendor/github.com/openshift/api/route/v1/generated.pb.go new file mode 100644 index 0000000000000000000000000000000000000000..38543a90fdccbe99ee65ab3ee1b258aaea566c14 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/generated.pb.go @@ -0,0 +1,3065 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/route/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Route) Reset() { *m = Route{} } +func (*Route) ProtoMessage() {} +func (*Route) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{0} +} +func (m *Route) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Route) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Route) XXX_Merge(src proto.Message) { + xxx_messageInfo_Route.Merge(m, src) +} +func (m *Route) XXX_Size() int { + return m.Size() +} +func (m *Route) XXX_DiscardUnknown() { + xxx_messageInfo_Route.DiscardUnknown(m) +} + +var xxx_messageInfo_Route proto.InternalMessageInfo + +func (m *RouteIngress) Reset() { *m = RouteIngress{} } +func (*RouteIngress) ProtoMessage() {} +func (*RouteIngress) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{1} +} +func (m *RouteIngress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteIngress) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteIngress.Merge(m, src) +} +func (m *RouteIngress) XXX_Size() int { + return m.Size() +} +func (m *RouteIngress) XXX_DiscardUnknown() { + xxx_messageInfo_RouteIngress.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteIngress proto.InternalMessageInfo + +func (m *RouteIngressCondition) Reset() { *m = RouteIngressCondition{} } +func (*RouteIngressCondition) ProtoMessage() {} +func (*RouteIngressCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{2} +} +func (m *RouteIngressCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteIngressCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteIngressCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteIngressCondition.Merge(m, src) +} +func (m *RouteIngressCondition) XXX_Size() int { + return m.Size() +} +func (m *RouteIngressCondition) XXX_DiscardUnknown() { + xxx_messageInfo_RouteIngressCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteIngressCondition proto.InternalMessageInfo + +func (m *RouteList) Reset() { *m = RouteList{} } +func (*RouteList) ProtoMessage() {} +func (*RouteList) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{3} +} +func (m *RouteList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteList.Merge(m, src) +} +func (m *RouteList) XXX_Size() int { + return m.Size() +} +func (m *RouteList) XXX_DiscardUnknown() { + xxx_messageInfo_RouteList.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteList proto.InternalMessageInfo + +func (m *RoutePort) Reset() { *m = RoutePort{} } +func (*RoutePort) ProtoMessage() {} +func (*RoutePort) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{4} +} +func (m *RoutePort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoutePort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoutePort) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoutePort.Merge(m, src) +} +func (m *RoutePort) XXX_Size() int { + return m.Size() +} +func (m *RoutePort) XXX_DiscardUnknown() { + xxx_messageInfo_RoutePort.DiscardUnknown(m) +} + +var xxx_messageInfo_RoutePort proto.InternalMessageInfo + +func (m *RouteSpec) Reset() { *m = RouteSpec{} } +func (*RouteSpec) ProtoMessage() {} +func (*RouteSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{5} +} +func (m *RouteSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteSpec.Merge(m, src) +} +func (m *RouteSpec) XXX_Size() int { + return m.Size() +} +func (m *RouteSpec) XXX_DiscardUnknown() { + xxx_messageInfo_RouteSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteSpec proto.InternalMessageInfo + +func (m *RouteStatus) Reset() { *m = RouteStatus{} } +func (*RouteStatus) ProtoMessage() {} +func (*RouteStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{6} +} +func (m *RouteStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteStatus.Merge(m, src) +} +func (m *RouteStatus) XXX_Size() int { + return m.Size() +} +func (m *RouteStatus) XXX_DiscardUnknown() { + xxx_messageInfo_RouteStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteStatus proto.InternalMessageInfo + +func (m *RouteTargetReference) Reset() { *m = RouteTargetReference{} } +func (*RouteTargetReference) ProtoMessage() {} +func (*RouteTargetReference) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{7} +} +func (m *RouteTargetReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteTargetReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteTargetReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteTargetReference.Merge(m, src) +} +func (m *RouteTargetReference) XXX_Size() int { + return m.Size() +} +func (m *RouteTargetReference) XXX_DiscardUnknown() { + xxx_messageInfo_RouteTargetReference.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteTargetReference proto.InternalMessageInfo + +func (m *RouterShard) Reset() { *m = RouterShard{} } +func (*RouterShard) ProtoMessage() {} +func (*RouterShard) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{8} +} +func (m *RouterShard) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouterShard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouterShard) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouterShard.Merge(m, src) +} +func (m *RouterShard) XXX_Size() int { + return m.Size() +} +func (m *RouterShard) XXX_DiscardUnknown() { + xxx_messageInfo_RouterShard.DiscardUnknown(m) +} + +var xxx_messageInfo_RouterShard proto.InternalMessageInfo + +func (m *TLSConfig) Reset() { *m = TLSConfig{} } +func (*TLSConfig) ProtoMessage() {} +func (*TLSConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{9} +} +func (m *TLSConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TLSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TLSConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_TLSConfig.Merge(m, src) +} +func (m *TLSConfig) XXX_Size() int { + return m.Size() +} +func (m *TLSConfig) XXX_DiscardUnknown() { + xxx_messageInfo_TLSConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_TLSConfig proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Route)(nil), "github.com.openshift.api.route.v1.Route") + proto.RegisterType((*RouteIngress)(nil), "github.com.openshift.api.route.v1.RouteIngress") + proto.RegisterType((*RouteIngressCondition)(nil), "github.com.openshift.api.route.v1.RouteIngressCondition") + proto.RegisterType((*RouteList)(nil), "github.com.openshift.api.route.v1.RouteList") + proto.RegisterType((*RoutePort)(nil), "github.com.openshift.api.route.v1.RoutePort") + proto.RegisterType((*RouteSpec)(nil), "github.com.openshift.api.route.v1.RouteSpec") + proto.RegisterType((*RouteStatus)(nil), "github.com.openshift.api.route.v1.RouteStatus") + proto.RegisterType((*RouteTargetReference)(nil), "github.com.openshift.api.route.v1.RouteTargetReference") + proto.RegisterType((*RouterShard)(nil), "github.com.openshift.api.route.v1.RouterShard") + proto.RegisterType((*TLSConfig)(nil), "github.com.openshift.api.route.v1.TLSConfig") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/route/v1/generated.proto", fileDescriptor_373b8fa7ff738721) +} + +var fileDescriptor_373b8fa7ff738721 = []byte{ + // 1163 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xfa, 0x5f, 0xe2, 0x71, 0x1b, 0xc8, 0x40, 0xa9, 0x1b, 0x29, 0x76, 0xba, 0x07, 0x94, + 0xa2, 0xb2, 0x4b, 0x42, 0x81, 0x4a, 0x88, 0x43, 0x9d, 0x22, 0x48, 0xe3, 0xa4, 0xd1, 0xd8, 0xa2, + 0xa2, 0xea, 0x81, 0xc9, 0xee, 0x78, 0x3d, 0xd8, 0x9e, 0x5d, 0x66, 0xc6, 0x29, 0xbe, 0xa0, 0x4a, + 0x7c, 0x81, 0xf2, 0x6d, 0xb8, 0x73, 0xc9, 0xb1, 0xc7, 0x1e, 0x90, 0x45, 0xcc, 0x91, 0x6f, 0x90, + 0x13, 0x9a, 0xd9, 0xb1, 0x77, 0xed, 0x38, 0xa9, 0x0b, 0xb7, 0xdd, 0xf7, 0x7e, 0xbf, 0xdf, 0x7b, + 0xf3, 0xde, 0x9b, 0x37, 0x60, 0x3b, 0xa0, 0xb2, 0xdd, 0x3f, 0x76, 0xbc, 0xb0, 0xe7, 0x86, 0x11, + 0x61, 0xa2, 0x4d, 0x5b, 0xd2, 0xc5, 0x11, 0x75, 0x79, 0xd8, 0x97, 0xc4, 0x3d, 0xd9, 0x76, 0x03, + 0xc2, 0x08, 0xc7, 0x92, 0xf8, 0x4e, 0xc4, 0x43, 0x19, 0xc2, 0xdb, 0x09, 0xc5, 0x99, 0x50, 0x1c, + 0x1c, 0x51, 0x47, 0x53, 0x9c, 0x93, 0xed, 0xf5, 0x8f, 0x53, 0xaa, 0x41, 0x18, 0x84, 0xae, 0x66, + 0x1e, 0xf7, 0x5b, 0xfa, 0x4f, 0xff, 0xe8, 0xaf, 0x58, 0x71, 0xdd, 0xee, 0xdc, 0x17, 0x0e, 0x0d, + 0x75, 0x58, 0x2f, 0xe4, 0xf3, 0xa2, 0xae, 0xdf, 0x4b, 0x30, 0x3d, 0xec, 0xb5, 0x29, 0x23, 0x7c, + 0xe0, 0x46, 0x9d, 0x40, 0x19, 0x84, 0xdb, 0x23, 0x12, 0xcf, 0x63, 0x7d, 0x7e, 0x19, 0x8b, 0xf7, + 0x99, 0xa4, 0x3d, 0xe2, 0x0a, 0xaf, 0x4d, 0x7a, 0xf8, 0x02, 0xef, 0xd3, 0xcb, 0x78, 0x7d, 0x49, + 0xbb, 0x2e, 0x65, 0x52, 0x48, 0x3e, 0x4b, 0xb2, 0x7f, 0xcb, 0x80, 0x3c, 0x52, 0x25, 0x80, 0x3f, + 0x80, 0x15, 0x95, 0x91, 0x8f, 0x25, 0x2e, 0x5b, 0x9b, 0xd6, 0x56, 0x69, 0xe7, 0x13, 0x27, 0x56, + 0x74, 0xd2, 0x8a, 0x4e, 0xd4, 0x09, 0x94, 0x41, 0x38, 0x0a, 0xed, 0x9c, 0x6c, 0x3b, 0x8f, 0x8f, + 0x7f, 0x24, 0x9e, 0x3c, 0x20, 0x12, 0xd7, 0xe0, 0xe9, 0xb0, 0xba, 0x34, 0x1a, 0x56, 0x41, 0x62, + 0x43, 0x13, 0x55, 0x78, 0x08, 0x72, 0x22, 0x22, 0x5e, 0x39, 0xa3, 0xd5, 0xef, 0x3a, 0x6f, 0xec, + 0x89, 0xa3, 0x33, 0x6b, 0x44, 0xc4, 0xab, 0x5d, 0x33, 0xca, 0x39, 0xf5, 0x87, 0xb4, 0x0e, 0xfc, + 0x0e, 0x14, 0x84, 0xc4, 0xb2, 0x2f, 0xca, 0x59, 0xad, 0xe8, 0x2c, 0xac, 0xa8, 0x59, 0xb5, 0x55, + 0xa3, 0x59, 0x88, 0xff, 0x91, 0x51, 0xb3, 0x7f, 0xcd, 0x82, 0x6b, 0x1a, 0xb7, 0xc7, 0x02, 0x4e, + 0x84, 0x80, 0x9b, 0x20, 0xd7, 0x0e, 0x85, 0xd4, 0x65, 0x29, 0x26, 0xa9, 0x7c, 0x1b, 0x0a, 0x89, + 0xb4, 0x07, 0xee, 0x00, 0xa0, 0x43, 0xf0, 0x43, 0xdc, 0x23, 0xfa, 0x80, 0xc5, 0xa4, 0x18, 0x68, + 0xe2, 0x41, 0x29, 0x14, 0xec, 0x02, 0xe0, 0x85, 0xcc, 0xa7, 0x92, 0x86, 0x4c, 0x1d, 0x21, 0xbb, + 0x55, 0xda, 0xb9, 0xbf, 0xe8, 0x11, 0x4c, 0x6a, 0xbb, 0x63, 0x81, 0x24, 0xda, 0xc4, 0x24, 0x50, + 0x4a, 0x1f, 0x36, 0xc1, 0xea, 0x73, 0xda, 0xf5, 0x3d, 0xcc, 0xfd, 0xa3, 0xb0, 0x4b, 0xbd, 0x41, + 0x39, 0xa7, 0xb3, 0xbc, 0x6b, 0x78, 0xab, 0x4f, 0xa6, 0xbc, 0xe7, 0xc3, 0x2a, 0x9c, 0xb6, 0x34, + 0x07, 0x11, 0x41, 0x33, 0x1a, 0xf0, 0x7b, 0x70, 0x33, 0x3e, 0xd1, 0x2e, 0x66, 0x21, 0xa3, 0x1e, + 0xee, 0xaa, 0xa2, 0x30, 0x55, 0x84, 0xbc, 0x96, 0xaf, 0x1a, 0xf9, 0x9b, 0x68, 0x3e, 0x0c, 0x5d, + 0xc6, 0xb7, 0xff, 0xc9, 0x80, 0x1b, 0x73, 0x8f, 0x0a, 0xbf, 0x02, 0x39, 0x39, 0x88, 0x88, 0x69, + 0xc7, 0x9d, 0x71, 0x3b, 0x54, 0x82, 0xe7, 0xc3, 0xea, 0xad, 0xb9, 0x24, 0x9d, 0xbd, 0xa6, 0xc1, + 0xfa, 0x64, 0x6c, 0xe2, 0x3e, 0xdd, 0x9b, 0x1e, 0x83, 0xf3, 0x61, 0x75, 0xce, 0xdd, 0x76, 0x26, + 0x4a, 0xd3, 0xc3, 0x02, 0x3f, 0x04, 0x05, 0x4e, 0xb0, 0x08, 0x99, 0x1e, 0xc2, 0x62, 0x32, 0x54, + 0x48, 0x5b, 0x91, 0xf1, 0xc2, 0x3b, 0x60, 0xb9, 0x47, 0x84, 0xc0, 0x01, 0x31, 0x85, 0x7f, 0xc7, + 0x00, 0x97, 0x0f, 0x62, 0x33, 0x1a, 0xfb, 0x21, 0x07, 0xb0, 0x8b, 0x85, 0x6c, 0x72, 0xcc, 0x44, + 0x9c, 0x3c, 0x35, 0xf5, 0x2c, 0xed, 0x7c, 0xb4, 0xd8, 0x9d, 0x54, 0x8c, 0xda, 0x07, 0xa3, 0x61, + 0x15, 0xd6, 0x2f, 0x28, 0xa1, 0x39, 0xea, 0xf6, 0xef, 0x16, 0x28, 0xea, 0xc2, 0xd5, 0xa9, 0x90, + 0xf0, 0xd9, 0x85, 0x5d, 0xe0, 0x2c, 0x16, 0x57, 0xb1, 0xf5, 0x26, 0x78, 0xd7, 0x9c, 0x6e, 0x65, + 0x6c, 0x49, 0xed, 0x81, 0x03, 0x90, 0xa7, 0x92, 0xf4, 0x54, 0xfd, 0xd5, 0xcc, 0x6f, 0x2d, 0x3a, + 0xf3, 0xb5, 0xeb, 0x46, 0x34, 0xbf, 0xa7, 0xe8, 0x28, 0x56, 0xb1, 0x7f, 0x32, 0x99, 0x1f, 0x85, + 0x5c, 0x42, 0x1f, 0x00, 0x89, 0x79, 0x40, 0xa4, 0xfa, 0x7b, 0xe3, 0x1e, 0x53, 0x9b, 0xd1, 0x89, + 0x37, 0xa3, 0xb3, 0xc7, 0xe4, 0x63, 0xde, 0x90, 0x9c, 0xb2, 0x20, 0xb9, 0x4c, 0xcd, 0x89, 0x16, + 0x4a, 0xe9, 0xda, 0x7f, 0xe4, 0x4c, 0x4c, 0xb5, 0x8d, 0x16, 0x58, 0x0f, 0x2e, 0x28, 0x8a, 0xfe, + 0xb1, 0x1f, 0xf6, 0x30, 0x65, 0xe5, 0x15, 0x0d, 0x5b, 0x33, 0xb0, 0x62, 0x63, 0xec, 0x40, 0x09, + 0x46, 0x49, 0x46, 0x58, 0xb6, 0xcd, 0x84, 0x4e, 0x24, 0x8f, 0xb0, 0x6c, 0x23, 0xed, 0x81, 0x0d, + 0x90, 0x91, 0xa1, 0x59, 0x7c, 0x5f, 0x2c, 0x5a, 0xc1, 0xf8, 0x38, 0x88, 0xb4, 0x08, 0x27, 0xcc, + 0x23, 0x35, 0x60, 0x84, 0x33, 0xcd, 0x10, 0x65, 0x64, 0x08, 0x5f, 0x58, 0x60, 0x0d, 0x77, 0x25, + 0xe1, 0x0c, 0x4b, 0x52, 0xc3, 0x5e, 0x87, 0x30, 0x5f, 0x94, 0x73, 0xba, 0x4d, 0xff, 0x39, 0xc8, + 0x2d, 0x13, 0x64, 0xed, 0xc1, 0xac, 0x32, 0xba, 0x18, 0x0c, 0x3e, 0x02, 0xb9, 0x48, 0xb5, 0x2e, + 0xff, 0x76, 0x8f, 0x84, 0x6a, 0x4b, 0x6d, 0x45, 0xd7, 0x48, 0x35, 0x4b, 0x6b, 0xc0, 0x6f, 0x40, + 0x56, 0x76, 0x45, 0xb9, 0xb0, 0xb0, 0x54, 0xb3, 0xde, 0xd8, 0x0d, 0x59, 0x8b, 0x06, 0xb5, 0xe5, + 0xd1, 0xb0, 0x9a, 0x6d, 0xd6, 0x1b, 0x48, 0x29, 0xcc, 0x59, 0x9e, 0xcb, 0xff, 0x7f, 0x79, 0xda, + 0x14, 0x94, 0x52, 0xcf, 0x11, 0x7c, 0x0a, 0x96, 0x69, 0xbc, 0xb5, 0xca, 0x96, 0xae, 0xb8, 0xfb, + 0x96, 0x8f, 0x41, 0xb2, 0x52, 0x8c, 0x01, 0x8d, 0x05, 0xed, 0x5f, 0xc0, 0xfb, 0xf3, 0x7a, 0xa3, + 0xe6, 0xac, 0x43, 0x99, 0x3f, 0x3b, 0xba, 0xfb, 0x94, 0xf9, 0x48, 0x7b, 0x14, 0x82, 0x25, 0x6f, + 0xda, 0x04, 0xa1, 0x5f, 0x33, 0xed, 0x81, 0x36, 0x28, 0x3c, 0x27, 0x34, 0x68, 0x4b, 0x3d, 0x8d, + 0xf9, 0x1a, 0x50, 0xdb, 0xef, 0x89, 0xb6, 0x20, 0xe3, 0xb1, 0x43, 0x73, 0x54, 0xde, 0x68, 0x63, + 0xee, 0xeb, 0xfb, 0xa0, 0x3e, 0xf4, 0x6b, 0x69, 0xcd, 0xdc, 0x87, 0xb1, 0x03, 0x25, 0x18, 0x45, + 0xf0, 0x99, 0x68, 0xf4, 0x5b, 0x2d, 0xfa, 0xb3, 0x49, 0x65, 0x42, 0x78, 0x78, 0xd8, 0x88, 0x1d, + 0x28, 0xc1, 0xd8, 0x7f, 0x66, 0x41, 0x71, 0xd2, 0x4d, 0xb8, 0x0f, 0x4a, 0x92, 0xf0, 0x1e, 0x65, + 0x58, 0x2d, 0xbc, 0x99, 0x87, 0xa3, 0xd4, 0x4c, 0x5c, 0xaa, 0x73, 0xcd, 0x7a, 0x23, 0x65, 0xd1, + 0x9d, 0x4b, 0xb3, 0xe1, 0x67, 0xa0, 0xe4, 0x11, 0x2e, 0x69, 0x8b, 0x7a, 0x58, 0x8e, 0x0b, 0xf3, + 0xde, 0x58, 0x6c, 0x37, 0x71, 0xa1, 0x34, 0x0e, 0x6e, 0x80, 0x6c, 0x87, 0x0c, 0xcc, 0x2b, 0x51, + 0x32, 0xf0, 0xec, 0x3e, 0x19, 0x20, 0x65, 0x87, 0x5f, 0x82, 0xeb, 0x1e, 0x4e, 0x91, 0xcd, 0x2b, + 0x71, 0xc3, 0x00, 0xaf, 0xef, 0x3e, 0x48, 0x2b, 0x4f, 0x63, 0xe1, 0x33, 0x50, 0xf6, 0x89, 0x90, + 0x26, 0xc3, 0x29, 0xa8, 0x79, 0x87, 0x37, 0x8d, 0x4e, 0xf9, 0xe1, 0x25, 0x38, 0x74, 0xa9, 0x02, + 0x7c, 0x69, 0x81, 0x0d, 0xca, 0x04, 0xf1, 0xfa, 0x9c, 0x7c, 0xed, 0x07, 0x24, 0x55, 0x1d, 0x73, + 0x1b, 0x0a, 0x3a, 0xc6, 0x23, 0x13, 0x63, 0x63, 0xef, 0x2a, 0xf0, 0xf9, 0xb0, 0x7a, 0xfb, 0x4a, + 0x80, 0xae, 0xf8, 0xd5, 0x01, 0x6b, 0x5b, 0xa7, 0x67, 0x95, 0xa5, 0x57, 0x67, 0x95, 0xa5, 0xd7, + 0x67, 0x95, 0xa5, 0x17, 0xa3, 0x8a, 0x75, 0x3a, 0xaa, 0x58, 0xaf, 0x46, 0x15, 0xeb, 0xf5, 0xa8, + 0x62, 0xfd, 0x35, 0xaa, 0x58, 0x2f, 0xff, 0xae, 0x2c, 0x3d, 0xcd, 0x9c, 0x6c, 0xff, 0x1b, 0x00, + 0x00, 0xff, 0xff, 0x26, 0x8b, 0x83, 0xf6, 0x2d, 0x0c, 0x00, 0x00, +} + +func (m *Route) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Route) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Route) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteIngress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteIngress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.RouterCanonicalHostname) + copy(dAtA[i:], m.RouterCanonicalHostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RouterCanonicalHostname))) + i-- + dAtA[i] = 0x2a + i -= len(m.WildcardPolicy) + copy(dAtA[i:], m.WildcardPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WildcardPolicy))) + i-- + dAtA[i] = 0x22 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.RouterName) + copy(dAtA[i:], m.RouterName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RouterName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteIngressCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteIngressCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteIngressCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastTransitionTime != nil { + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoutePort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoutePort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoutePort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.TargetPort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Subdomain) + copy(dAtA[i:], m.Subdomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) + i-- + dAtA[i] = 0x42 + i -= len(m.WildcardPolicy) + copy(dAtA[i:], m.WildcardPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WildcardPolicy))) + i-- + dAtA[i] = 0x3a + if m.TLS != nil { + { + size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Port != nil { + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.AlternateBackends) > 0 { + for iNdEx := len(m.AlternateBackends) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AlternateBackends[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RouteTargetReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteTargetReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteTargetReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Weight != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Weight)) + i-- + dAtA[i] = 0x18 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouterShard) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouterShard) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouterShard) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DNSSuffix) + copy(dAtA[i:], m.DNSSuffix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSSuffix))) + i-- + dAtA[i] = 0x12 + i -= len(m.ShardName) + copy(dAtA[i:], m.ShardName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShardName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TLSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TLSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.InsecureEdgeTerminationPolicy) + copy(dAtA[i:], m.InsecureEdgeTerminationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.InsecureEdgeTerminationPolicy))) + i-- + dAtA[i] = 0x32 + i -= len(m.DestinationCACertificate) + copy(dAtA[i:], m.DestinationCACertificate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationCACertificate))) + i-- + dAtA[i] = 0x2a + i -= len(m.CACertificate) + copy(dAtA[i:], m.CACertificate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CACertificate))) + i-- + dAtA[i] = 0x22 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x1a + i -= len(m.Certificate) + copy(dAtA[i:], m.Certificate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate))) + i-- + dAtA[i] = 0x12 + i -= len(m.Termination) + copy(dAtA[i:], m.Termination) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Termination))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Route) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RouteIngress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RouterName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.WildcardPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RouterCanonicalHostname) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RouteIngressCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.LastTransitionTime != nil { + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RouteList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoutePort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.TargetPort.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RouteSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AlternateBackends) > 0 { + for _, e := range m.AlternateBackends { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TLS != nil { + l = m.TLS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.WildcardPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subdomain) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RouteStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RouteTargetReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Weight != nil { + n += 1 + sovGenerated(uint64(*m.Weight)) + } + return n +} + +func (m *RouterShard) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ShardName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DNSSuffix) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TLSConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Termination) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Certificate) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CACertificate) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DestinationCACertificate) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.InsecureEdgeTerminationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Route) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Route{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "RouteSpec", "RouteSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "RouteStatus", "RouteStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RouteIngress) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]RouteIngressCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "RouteIngressCondition", "RouteIngressCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&RouteIngress{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `RouterName:` + fmt.Sprintf("%v", this.RouterName) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `WildcardPolicy:` + fmt.Sprintf("%v", this.WildcardPolicy) + `,`, + `RouterCanonicalHostname:` + fmt.Sprintf("%v", this.RouterCanonicalHostname) + `,`, + `}`, + }, "") + return s +} +func (this *RouteIngressCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouteIngressCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastTransitionTime:` + strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RouteList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Route{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Route", "Route", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RouteList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RoutePort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoutePort{`, + `TargetPort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetPort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RouteSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForAlternateBackends := "[]RouteTargetReference{" + for _, f := range this.AlternateBackends { + repeatedStringForAlternateBackends += strings.Replace(strings.Replace(f.String(), "RouteTargetReference", "RouteTargetReference", 1), `&`, ``, 1) + "," + } + repeatedStringForAlternateBackends += "}" + s := strings.Join([]string{`&RouteSpec{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `To:` + strings.Replace(strings.Replace(this.To.String(), "RouteTargetReference", "RouteTargetReference", 1), `&`, ``, 1) + `,`, + `AlternateBackends:` + repeatedStringForAlternateBackends + `,`, + `Port:` + strings.Replace(this.Port.String(), "RoutePort", "RoutePort", 1) + `,`, + `TLS:` + strings.Replace(this.TLS.String(), "TLSConfig", "TLSConfig", 1) + `,`, + `WildcardPolicy:` + fmt.Sprintf("%v", this.WildcardPolicy) + `,`, + `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, + `}`, + }, "") + return s +} +func (this *RouteStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForIngress := "[]RouteIngress{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "RouteIngress", "RouteIngress", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + s := strings.Join([]string{`&RouteStatus{`, + `Ingress:` + repeatedStringForIngress + `,`, + `}`, + }, "") + return s +} +func (this *RouteTargetReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouteTargetReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Weight:` + valueToStringGenerated(this.Weight) + `,`, + `}`, + }, "") + return s +} +func (this *RouterShard) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouterShard{`, + `ShardName:` + fmt.Sprintf("%v", this.ShardName) + `,`, + `DNSSuffix:` + fmt.Sprintf("%v", this.DNSSuffix) + `,`, + `}`, + }, "") + return s +} +func (this *TLSConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TLSConfig{`, + `Termination:` + fmt.Sprintf("%v", this.Termination) + `,`, + `Certificate:` + fmt.Sprintf("%v", this.Certificate) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `CACertificate:` + fmt.Sprintf("%v", this.CACertificate) + `,`, + `DestinationCACertificate:` + fmt.Sprintf("%v", this.DestinationCACertificate) + `,`, + `InsecureEdgeTerminationPolicy:` + fmt.Sprintf("%v", this.InsecureEdgeTerminationPolicy) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Route) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Route: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Route: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteIngress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteIngress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteIngress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RouterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RouterName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, RouteIngressCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WildcardPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WildcardPolicy = WildcardPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RouterCanonicalHostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RouterCanonicalHostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteIngressCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteIngressCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteIngressCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = RouteIngressConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastTransitionTime == nil { + m.LastTransitionTime = &v1.Time{} + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Route{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoutePort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoutePort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoutePort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetPort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AlternateBackends", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AlternateBackends = append(m.AlternateBackends, RouteTargetReference{}) + if err := m.AlternateBackends[len(m.AlternateBackends)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &RoutePort{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TLS == nil { + m.TLS = &TLSConfig{} + } + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WildcardPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WildcardPolicy = WildcardPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subdomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subdomain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, RouteIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteTargetReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteTargetReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteTargetReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Weight = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouterShard) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouterShard: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouterShard: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSSuffix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DNSSuffix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TLSConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TLSConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TLSConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Termination", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Termination = TLSTerminationType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Certificate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACertificate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACertificate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationCACertificate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationCACertificate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InsecureEdgeTerminationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InsecureEdgeTerminationPolicy = InsecureEdgeTerminationPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/openshift/api/route/v1/generated.proto b/vendor/github.com/openshift/api/route/v1/generated.proto new file mode 100644 index 0000000000000000000000000000000000000000..24476e1a3c854ba91d2679255d47ed6f21afd8ae --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/generated.proto @@ -0,0 +1,233 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package github.com.openshift.api.route.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// A route allows developers to expose services through an HTTP(S) aware load balancing and proxy +// layer via a public DNS entry. The route may further specify TLS options and a certificate, or +// specify a public CNAME that the router should also accept for HTTP and HTTPS traffic. An +// administrator typically configures their router to be visible outside the cluster firewall, and +// may also add additional security, caching, or traffic controls on the service content. Routers +// usually talk directly to the service endpoints. +// +// Once a route is created, the `host` field may not be changed. Generally, routers use the oldest +// route with a given host when resolving conflicts. +// +// Routers are subject to additional customization and may support additional controls via the +// annotations field. +// +// Because administrators may configure multiple routers, the route status field is used to +// return information to clients about the names and states of the route under each router. +// If a client chooses a duplicate name, for instance, the route status conditions are used +// to indicate the route cannot be chosen. +message Route { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the route + optional RouteSpec spec = 2; + + // status is the current state of the route + // +optional + optional RouteStatus status = 3; +} + +// RouteIngress holds information about the places where a route is exposed. +message RouteIngress { + // Host is the host string under which the route is exposed; this value is required + optional string host = 1; + + // Name is a name chosen by the router to identify itself; this value is required + optional string routerName = 2; + + // Conditions is the state of the route, may be empty. + repeated RouteIngressCondition conditions = 3; + + // Wildcard policy is the wildcard policy that was allowed where this route is exposed. + optional string wildcardPolicy = 4; + + // CanonicalHostname is the external host name for the router that can be used as a CNAME + // for the host requested for this route. This value is optional and may not be set in all cases. + optional string routerCanonicalHostname = 5; +} + +// RouteIngressCondition contains details for the current condition of this route on a particular +// router. +message RouteIngressCondition { + // Type is the type of the condition. + // Currently only Ready. + optional string type = 1; + + // Status is the status of the condition. + // Can be True, False, Unknown. + optional string status = 2; + + // (brief) reason for the condition's last transition, and is usually a machine and human + // readable constant + optional string reason = 3; + + // Human readable message indicating details about last transition. + optional string message = 4; + + // RFC 3339 date and time when this condition last transitioned + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; +} + +// RouteList is a collection of Routes. +message RouteList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of routes + repeated Route items = 2; +} + +// RoutePort defines a port mapping from a router to an endpoint in the service endpoints. +message RoutePort { + // The target port on pods selected by the service this route points to. + // If this is a string, it will be looked up as a named port in the target + // endpoints port list. Required + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 1; +} + +// RouteSpec describes the hostname or path the route exposes, any security information, +// and one to four backends (services) the route points to. Requests are distributed +// among the backends depending on the weights assigned to each backend. When using +// roundrobin scheduling the portion of requests that go to each backend is the backend +// weight divided by the sum of all of the backend weights. When the backend has more than +// one endpoint the requests that end up on the backend are roundrobin distributed among +// the endpoints. Weights are between 0 and 256 with default 100. Weight 0 causes no requests +// to the backend. If all weights are zero the route will be considered to have no backends +// and return a standard 503 response. +// +// The `tls` field is optional and allows specific certificates or behavior for the +// route. Routers typically configure a default certificate on a wildcard domain to +// terminate routes without explicit certificates, but custom hostnames usually must +// choose passthrough (send traffic directly to the backend via the TLS Server-Name- +// Indication field) or provide a certificate. +message RouteSpec { + // host is an alias/DNS that points to the service. Optional. + // If not specified a route name will typically be automatically + // chosen. + // Must follow DNS952 subdomain conventions. + // +optional + optional string host = 1; + + // subdomain is a DNS subdomain that is requested within the ingress controller's + // domain (as a subdomain). If host is set this field is ignored. An ingress + // controller may choose to ignore this suggested name, in which case the controller + // will report the assigned name in the status.ingress array or refuse to admit the + // route. If this value is set and the server does not support this field host will + // be populated automatically. Otherwise host is left empty. The field may have + // multiple parts separated by a dot, but not all ingress controllers may honor + // the request. This field may not be changed after creation except by a user with + // the update routes/custom-host permission. + // + // Example: subdomain `frontend` automatically receives the router subdomain + // `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`. + // + // +optional + optional string subdomain = 8; + + // path that the router watches for, to route traffic for to the service. Optional + optional string path = 2; + + // to is an object the route should use as the primary backend. Only the Service kind + // is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) + // is set to zero, no traffic will be sent to this backend. + optional RouteTargetReference to = 3; + + // alternateBackends allows up to 3 additional backends to be assigned to the route. + // Only the Service kind is allowed, and it will be defaulted to Service. + // Use the weight field in RouteTargetReference object to specify relative preference. + repeated RouteTargetReference alternateBackends = 4; + + // If specified, the port to be used by the router. Most routers will use all + // endpoints exposed by the service by default - set this value to instruct routers + // which port to use. + optional RoutePort port = 5; + + // The tls field provides the ability to configure certificates and termination for the route. + optional TLSConfig tls = 6; + + // Wildcard policy if any for the route. + // Currently only 'Subdomain' or 'None' is allowed. + optional string wildcardPolicy = 7; +} + +// RouteStatus provides relevant info about the status of a route, including which routers +// acknowledge it. +message RouteStatus { + // ingress describes the places where the route may be exposed. The list of + // ingress points may contain duplicate Host or RouterName values. Routes + // are considered live once they are `Ready` + repeated RouteIngress ingress = 1; +} + +// RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' +// kind is allowed. Use 'weight' field to emphasize one over others. +message RouteTargetReference { + // The kind of target that the route is referring to. Currently, only 'Service' is allowed + optional string kind = 1; + + // name of the service/target that is being referred to. e.g. name of the service + optional string name = 2; + + // weight as an integer between 0 and 256, default 100, that specifies the target's relative weight + // against other target reference objects. 0 suppresses requests to this backend. + // +optional + optional int32 weight = 3; +} + +// RouterShard has information of a routing shard and is used to +// generate host names and routing table entries when a routing shard is +// allocated for a specific route. +// Caveat: This is WIP and will likely undergo modifications when sharding +// support is added. +message RouterShard { + // shardName uniquely identifies a router shard in the "set" of + // routers used for routing traffic to the services. + optional string shardName = 1; + + // dnsSuffix for the shard ala: shard-1.v3.openshift.com + optional string dnsSuffix = 2; +} + +// TLSConfig defines config used to secure a route and provide termination +message TLSConfig { + // termination indicates termination type. + optional string termination = 1; + + // certificate provides certificate contents + optional string certificate = 2; + + // key provides key file contents + optional string key = 3; + + // caCertificate provides the cert authority certificate contents + optional string caCertificate = 4; + + // destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt + // termination this file should be provided in order to have routers use it for health checks on the secure connection. + // If this field is not specified, the router may provide its own destination CA and perform hostname validation using + // the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically + // verify. + optional string destinationCACertificate = 5; + + // insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While + // each router may make its own decisions on which ports to expose, this is normally port 80. + // + // * Allow - traffic is sent to the server on the insecure port (default) + // * Disable - no traffic is allowed on the insecure port. + // * Redirect - clients are redirected to the secure port. + optional string insecureEdgeTerminationPolicy = 6; +} + diff --git a/vendor/github.com/openshift/api/route/v1/legacy.go b/vendor/github.com/openshift/api/route/v1/legacy.go new file mode 100644 index 0000000000000000000000000000000000000000..498f5dd0f0604ece5d777bbc29f812d7f27219fa --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/legacy.go @@ -0,0 +1,22 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Route{}, + &RouteList{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/route/v1/register.go b/vendor/github.com/openshift/api/route/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..6f99ef5c96ae4b9563030cd097dcb379970d6249 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/register.go @@ -0,0 +1,39 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "route.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Route{}, + &RouteList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/route/v1/types.go b/vendor/github.com/openshift/api/route/v1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..9c59fd413e1cc82536e2b466f408ba0838a8c79f --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/types.go @@ -0,0 +1,272 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// A route allows developers to expose services through an HTTP(S) aware load balancing and proxy +// layer via a public DNS entry. The route may further specify TLS options and a certificate, or +// specify a public CNAME that the router should also accept for HTTP and HTTPS traffic. An +// administrator typically configures their router to be visible outside the cluster firewall, and +// may also add additional security, caching, or traffic controls on the service content. Routers +// usually talk directly to the service endpoints. +// +// Once a route is created, the `host` field may not be changed. Generally, routers use the oldest +// route with a given host when resolving conflicts. +// +// Routers are subject to additional customization and may support additional controls via the +// annotations field. +// +// Because administrators may configure multiple routers, the route status field is used to +// return information to clients about the names and states of the route under each router. +// If a client chooses a duplicate name, for instance, the route status conditions are used +// to indicate the route cannot be chosen. +type Route struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the desired state of the route + Spec RouteSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status is the current state of the route + // +optional + Status RouteStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RouteList is a collection of Routes. +type RouteList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of routes + Items []Route `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// RouteSpec describes the hostname or path the route exposes, any security information, +// and one to four backends (services) the route points to. Requests are distributed +// among the backends depending on the weights assigned to each backend. When using +// roundrobin scheduling the portion of requests that go to each backend is the backend +// weight divided by the sum of all of the backend weights. When the backend has more than +// one endpoint the requests that end up on the backend are roundrobin distributed among +// the endpoints. Weights are between 0 and 256 with default 100. Weight 0 causes no requests +// to the backend. If all weights are zero the route will be considered to have no backends +// and return a standard 503 response. +// +// The `tls` field is optional and allows specific certificates or behavior for the +// route. Routers typically configure a default certificate on a wildcard domain to +// terminate routes without explicit certificates, but custom hostnames usually must +// choose passthrough (send traffic directly to the backend via the TLS Server-Name- +// Indication field) or provide a certificate. +type RouteSpec struct { + // host is an alias/DNS that points to the service. Optional. + // If not specified a route name will typically be automatically + // chosen. + // Must follow DNS952 subdomain conventions. + // +optional + Host string `json:"host" protobuf:"bytes,1,opt,name=host"` + // subdomain is a DNS subdomain that is requested within the ingress controller's + // domain (as a subdomain). If host is set this field is ignored. An ingress + // controller may choose to ignore this suggested name, in which case the controller + // will report the assigned name in the status.ingress array or refuse to admit the + // route. If this value is set and the server does not support this field host will + // be populated automatically. Otherwise host is left empty. The field may have + // multiple parts separated by a dot, but not all ingress controllers may honor + // the request. This field may not be changed after creation except by a user with + // the update routes/custom-host permission. + // + // Example: subdomain `frontend` automatically receives the router subdomain + // `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`. + // + // +optional + Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,8,opt,name=subdomain"` + + // path that the router watches for, to route traffic for to the service. Optional + Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` + + // to is an object the route should use as the primary backend. Only the Service kind + // is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) + // is set to zero, no traffic will be sent to this backend. + To RouteTargetReference `json:"to" protobuf:"bytes,3,opt,name=to"` + + // alternateBackends allows up to 3 additional backends to be assigned to the route. + // Only the Service kind is allowed, and it will be defaulted to Service. + // Use the weight field in RouteTargetReference object to specify relative preference. + AlternateBackends []RouteTargetReference `json:"alternateBackends,omitempty" protobuf:"bytes,4,rep,name=alternateBackends"` + + // If specified, the port to be used by the router. Most routers will use all + // endpoints exposed by the service by default - set this value to instruct routers + // which port to use. + Port *RoutePort `json:"port,omitempty" protobuf:"bytes,5,opt,name=port"` + + // The tls field provides the ability to configure certificates and termination for the route. + TLS *TLSConfig `json:"tls,omitempty" protobuf:"bytes,6,opt,name=tls"` + + // Wildcard policy if any for the route. + // Currently only 'Subdomain' or 'None' is allowed. + WildcardPolicy WildcardPolicyType `json:"wildcardPolicy,omitempty" protobuf:"bytes,7,opt,name=wildcardPolicy"` +} + +// RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' +// kind is allowed. Use 'weight' field to emphasize one over others. +type RouteTargetReference struct { + // The kind of target that the route is referring to. Currently, only 'Service' is allowed + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + + // name of the service/target that is being referred to. e.g. name of the service + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // weight as an integer between 0 and 256, default 100, that specifies the target's relative weight + // against other target reference objects. 0 suppresses requests to this backend. + // +optional + Weight *int32 `json:"weight" protobuf:"varint,3,opt,name=weight"` +} + +// RoutePort defines a port mapping from a router to an endpoint in the service endpoints. +type RoutePort struct { + // The target port on pods selected by the service this route points to. + // If this is a string, it will be looked up as a named port in the target + // endpoints port list. Required + TargetPort intstr.IntOrString `json:"targetPort" protobuf:"bytes,1,opt,name=targetPort"` +} + +// RouteStatus provides relevant info about the status of a route, including which routers +// acknowledge it. +type RouteStatus struct { + // ingress describes the places where the route may be exposed. The list of + // ingress points may contain duplicate Host or RouterName values. Routes + // are considered live once they are `Ready` + Ingress []RouteIngress `json:"ingress" protobuf:"bytes,1,rep,name=ingress"` +} + +// RouteIngress holds information about the places where a route is exposed. +type RouteIngress struct { + // Host is the host string under which the route is exposed; this value is required + Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` + // Name is a name chosen by the router to identify itself; this value is required + RouterName string `json:"routerName,omitempty" protobuf:"bytes,2,opt,name=routerName"` + // Conditions is the state of the route, may be empty. + Conditions []RouteIngressCondition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` + // Wildcard policy is the wildcard policy that was allowed where this route is exposed. + WildcardPolicy WildcardPolicyType `json:"wildcardPolicy,omitempty" protobuf:"bytes,4,opt,name=wildcardPolicy"` + // CanonicalHostname is the external host name for the router that can be used as a CNAME + // for the host requested for this route. This value is optional and may not be set in all cases. + RouterCanonicalHostname string `json:"routerCanonicalHostname,omitempty" protobuf:"bytes,5,opt,name=routerCanonicalHostname"` +} + +// RouteIngressConditionType is a valid value for RouteCondition +type RouteIngressConditionType string + +// These are valid conditions of pod. +const ( + // RouteAdmitted means the route is able to service requests for the provided Host + RouteAdmitted RouteIngressConditionType = "Admitted" + // TODO: add other route condition types +) + +// RouteIngressCondition contains details for the current condition of this route on a particular +// router. +type RouteIngressCondition struct { + // Type is the type of the condition. + // Currently only Ready. + Type RouteIngressConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RouteIngressConditionType"` + // Status is the status of the condition. + // Can be True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // (brief) reason for the condition's last transition, and is usually a machine and human + // readable constant + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // Human readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + // RFC 3339 date and time when this condition last transitioned + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,5,opt,name=lastTransitionTime"` +} + +// RouterShard has information of a routing shard and is used to +// generate host names and routing table entries when a routing shard is +// allocated for a specific route. +// Caveat: This is WIP and will likely undergo modifications when sharding +// support is added. +type RouterShard struct { + // shardName uniquely identifies a router shard in the "set" of + // routers used for routing traffic to the services. + ShardName string `json:"shardName" protobuf:"bytes,1,opt,name=shardName"` + + // dnsSuffix for the shard ala: shard-1.v3.openshift.com + DNSSuffix string `json:"dnsSuffix" protobuf:"bytes,2,opt,name=dnsSuffix"` +} + +// TLSConfig defines config used to secure a route and provide termination +type TLSConfig struct { + // termination indicates termination type. + Termination TLSTerminationType `json:"termination" protobuf:"bytes,1,opt,name=termination,casttype=TLSTerminationType"` + + // certificate provides certificate contents + Certificate string `json:"certificate,omitempty" protobuf:"bytes,2,opt,name=certificate"` + + // key provides key file contents + Key string `json:"key,omitempty" protobuf:"bytes,3,opt,name=key"` + + // caCertificate provides the cert authority certificate contents + CACertificate string `json:"caCertificate,omitempty" protobuf:"bytes,4,opt,name=caCertificate"` + + // destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt + // termination this file should be provided in order to have routers use it for health checks on the secure connection. + // If this field is not specified, the router may provide its own destination CA and perform hostname validation using + // the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically + // verify. + DestinationCACertificate string `json:"destinationCACertificate,omitempty" protobuf:"bytes,5,opt,name=destinationCACertificate"` + + // insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While + // each router may make its own decisions on which ports to expose, this is normally port 80. + // + // * Allow - traffic is sent to the server on the insecure port (default) + // * Disable - no traffic is allowed on the insecure port. + // * Redirect - clients are redirected to the secure port. + InsecureEdgeTerminationPolicy InsecureEdgeTerminationPolicyType `json:"insecureEdgeTerminationPolicy,omitempty" protobuf:"bytes,6,opt,name=insecureEdgeTerminationPolicy,casttype=InsecureEdgeTerminationPolicyType"` +} + +// TLSTerminationType dictates where the secure communication will stop +// TODO: Reconsider this type in v2 +type TLSTerminationType string + +// InsecureEdgeTerminationPolicyType dictates the behavior of insecure +// connections to an edge-terminated route. +type InsecureEdgeTerminationPolicyType string + +const ( + // TLSTerminationEdge terminate encryption at the edge router. + TLSTerminationEdge TLSTerminationType = "edge" + // TLSTerminationPassthrough terminate encryption at the destination, the destination is responsible for decrypting traffic + TLSTerminationPassthrough TLSTerminationType = "passthrough" + // TLSTerminationReencrypt terminate encryption at the edge router and re-encrypt it with a new certificate supplied by the destination + TLSTerminationReencrypt TLSTerminationType = "reencrypt" + + // InsecureEdgeTerminationPolicyNone disables insecure connections for an edge-terminated route. + InsecureEdgeTerminationPolicyNone InsecureEdgeTerminationPolicyType = "None" + // InsecureEdgeTerminationPolicyAllow allows insecure connections for an edge-terminated route. + InsecureEdgeTerminationPolicyAllow InsecureEdgeTerminationPolicyType = "Allow" + // InsecureEdgeTerminationPolicyRedirect redirects insecure connections for an edge-terminated route. + // As an example, for routers that support HTTP and HTTPS, the + // insecure HTTP connections will be redirected to use HTTPS. + InsecureEdgeTerminationPolicyRedirect InsecureEdgeTerminationPolicyType = "Redirect" +) + +// WildcardPolicyType indicates the type of wildcard support needed by routes. +type WildcardPolicyType string + +const ( + // WildcardPolicyNone indicates no wildcard support is needed. + WildcardPolicyNone WildcardPolicyType = "None" + + // WildcardPolicySubdomain indicates the host needs wildcard support for the subdomain. + // Example: For host = "www.acme.test", indicates that the router + // should support requests for *.acme.test + // Note that this will not match acme.test only *.acme.test + WildcardPolicySubdomain WildcardPolicyType = "Subdomain" +) diff --git a/vendor/github.com/openshift/api/route/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/route/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..a9576c414ca563dcdc80fd98840faa443744065a --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/zz_generated.deepcopy.go @@ -0,0 +1,240 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Route) DeepCopyInto(out *Route) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route. +func (in *Route) DeepCopy() *Route { + if in == nil { + return nil + } + out := new(Route) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Route) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteIngress) DeepCopyInto(out *RouteIngress) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]RouteIngressCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteIngress. +func (in *RouteIngress) DeepCopy() *RouteIngress { + if in == nil { + return nil + } + out := new(RouteIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteIngressCondition) DeepCopyInto(out *RouteIngressCondition) { + *out = *in + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteIngressCondition. +func (in *RouteIngressCondition) DeepCopy() *RouteIngressCondition { + if in == nil { + return nil + } + out := new(RouteIngressCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteList) DeepCopyInto(out *RouteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Route, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteList. +func (in *RouteList) DeepCopy() *RouteList { + if in == nil { + return nil + } + out := new(RouteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RouteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutePort) DeepCopyInto(out *RoutePort) { + *out = *in + out.TargetPort = in.TargetPort + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutePort. +func (in *RoutePort) DeepCopy() *RoutePort { + if in == nil { + return nil + } + out := new(RoutePort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteSpec) DeepCopyInto(out *RouteSpec) { + *out = *in + in.To.DeepCopyInto(&out.To) + if in.AlternateBackends != nil { + in, out := &in.AlternateBackends, &out.AlternateBackends + *out = make([]RouteTargetReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(RoutePort) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteSpec. +func (in *RouteSpec) DeepCopy() *RouteSpec { + if in == nil { + return nil + } + out := new(RouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteStatus) DeepCopyInto(out *RouteStatus) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]RouteIngress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteStatus. +func (in *RouteStatus) DeepCopy() *RouteStatus { + if in == nil { + return nil + } + out := new(RouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteTargetReference) DeepCopyInto(out *RouteTargetReference) { + *out = *in + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTargetReference. +func (in *RouteTargetReference) DeepCopy() *RouteTargetReference { + if in == nil { + return nil + } + out := new(RouteTargetReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouterShard) DeepCopyInto(out *RouterShard) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouterShard. +func (in *RouterShard) DeepCopy() *RouterShard { + if in == nil { + return nil + } + out := new(RouterShard) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. +func (in *TLSConfig) DeepCopy() *TLSConfig { + if in == nil { + return nil + } + out := new(TLSConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..c0fc24b6530c7e84f71224337539c29c691d830a --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,128 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Route = map[string]string{ + "": "A route allows developers to expose services through an HTTP(S) aware load balancing and proxy layer via a public DNS entry. The route may further specify TLS options and a certificate, or specify a public CNAME that the router should also accept for HTTP and HTTPS traffic. An administrator typically configures their router to be visible outside the cluster firewall, and may also add additional security, caching, or traffic controls on the service content. Routers usually talk directly to the service endpoints.\n\nOnce a route is created, the `host` field may not be changed. Generally, routers use the oldest route with a given host when resolving conflicts.\n\nRouters are subject to additional customization and may support additional controls via the annotations field.\n\nBecause administrators may configure multiple routers, the route status field is used to return information to clients about the names and states of the route under each router. If a client chooses a duplicate name, for instance, the route status conditions are used to indicate the route cannot be chosen.", + "spec": "spec is the desired state of the route", + "status": "status is the current state of the route", +} + +func (Route) SwaggerDoc() map[string]string { + return map_Route +} + +var map_RouteIngress = map[string]string{ + "": "RouteIngress holds information about the places where a route is exposed.", + "host": "Host is the host string under which the route is exposed; this value is required", + "routerName": "Name is a name chosen by the router to identify itself; this value is required", + "conditions": "Conditions is the state of the route, may be empty.", + "wildcardPolicy": "Wildcard policy is the wildcard policy that was allowed where this route is exposed.", + "routerCanonicalHostname": "CanonicalHostname is the external host name for the router that can be used as a CNAME for the host requested for this route. This value is optional and may not be set in all cases.", +} + +func (RouteIngress) SwaggerDoc() map[string]string { + return map_RouteIngress +} + +var map_RouteIngressCondition = map[string]string{ + "": "RouteIngressCondition contains details for the current condition of this route on a particular router.", + "type": "Type is the type of the condition. Currently only Ready.", + "status": "Status is the status of the condition. Can be True, False, Unknown.", + "reason": "(brief) reason for the condition's last transition, and is usually a machine and human readable constant", + "message": "Human readable message indicating details about last transition.", + "lastTransitionTime": "RFC 3339 date and time when this condition last transitioned", +} + +func (RouteIngressCondition) SwaggerDoc() map[string]string { + return map_RouteIngressCondition +} + +var map_RouteList = map[string]string{ + "": "RouteList is a collection of Routes.", + "items": "items is a list of routes", +} + +func (RouteList) SwaggerDoc() map[string]string { + return map_RouteList +} + +var map_RoutePort = map[string]string{ + "": "RoutePort defines a port mapping from a router to an endpoint in the service endpoints.", + "targetPort": "The target port on pods selected by the service this route points to. If this is a string, it will be looked up as a named port in the target endpoints port list. Required", +} + +func (RoutePort) SwaggerDoc() map[string]string { + return map_RoutePort +} + +var map_RouteSpec = map[string]string{ + "": "RouteSpec describes the hostname or path the route exposes, any security information, and one to four backends (services) the route points to. Requests are distributed among the backends depending on the weights assigned to each backend. When using roundrobin scheduling the portion of requests that go to each backend is the backend weight divided by the sum of all of the backend weights. When the backend has more than one endpoint the requests that end up on the backend are roundrobin distributed among the endpoints. Weights are between 0 and 256 with default 100. Weight 0 causes no requests to the backend. If all weights are zero the route will be considered to have no backends and return a standard 503 response.\n\nThe `tls` field is optional and allows specific certificates or behavior for the route. Routers typically configure a default certificate on a wildcard domain to terminate routes without explicit certificates, but custom hostnames usually must choose passthrough (send traffic directly to the backend via the TLS Server-Name- Indication field) or provide a certificate.", + "host": "host is an alias/DNS that points to the service. Optional. If not specified a route name will typically be automatically chosen. Must follow DNS952 subdomain conventions.", + "subdomain": "subdomain is a DNS subdomain that is requested within the ingress controller's domain (as a subdomain). If host is set this field is ignored. An ingress controller may choose to ignore this suggested name, in which case the controller will report the assigned name in the status.ingress array or refuse to admit the route. If this value is set and the server does not support this field host will be populated automatically. Otherwise host is left empty. The field may have multiple parts separated by a dot, but not all ingress controllers may honor the request. This field may not be changed after creation except by a user with the update routes/custom-host permission.\n\nExample: subdomain `frontend` automatically receives the router subdomain `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`.", + "path": "path that the router watches for, to route traffic for to the service. Optional", + "to": "to is an object the route should use as the primary backend. Only the Service kind is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) is set to zero, no traffic will be sent to this backend.", + "alternateBackends": "alternateBackends allows up to 3 additional backends to be assigned to the route. Only the Service kind is allowed, and it will be defaulted to Service. Use the weight field in RouteTargetReference object to specify relative preference.", + "port": "If specified, the port to be used by the router. Most routers will use all endpoints exposed by the service by default - set this value to instruct routers which port to use.", + "tls": "The tls field provides the ability to configure certificates and termination for the route.", + "wildcardPolicy": "Wildcard policy if any for the route. Currently only 'Subdomain' or 'None' is allowed.", +} + +func (RouteSpec) SwaggerDoc() map[string]string { + return map_RouteSpec +} + +var map_RouteStatus = map[string]string{ + "": "RouteStatus provides relevant info about the status of a route, including which routers acknowledge it.", + "ingress": "ingress describes the places where the route may be exposed. The list of ingress points may contain duplicate Host or RouterName values. Routes are considered live once they are `Ready`", +} + +func (RouteStatus) SwaggerDoc() map[string]string { + return map_RouteStatus +} + +var map_RouteTargetReference = map[string]string{ + "": "RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' kind is allowed. Use 'weight' field to emphasize one over others.", + "kind": "The kind of target that the route is referring to. Currently, only 'Service' is allowed", + "name": "name of the service/target that is being referred to. e.g. name of the service", + "weight": "weight as an integer between 0 and 256, default 100, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend.", +} + +func (RouteTargetReference) SwaggerDoc() map[string]string { + return map_RouteTargetReference +} + +var map_RouterShard = map[string]string{ + "": "RouterShard has information of a routing shard and is used to generate host names and routing table entries when a routing shard is allocated for a specific route. Caveat: This is WIP and will likely undergo modifications when sharding\n support is added.", + "shardName": "shardName uniquely identifies a router shard in the \"set\" of routers used for routing traffic to the services.", + "dnsSuffix": "dnsSuffix for the shard ala: shard-1.v3.openshift.com", +} + +func (RouterShard) SwaggerDoc() map[string]string { + return map_RouterShard +} + +var map_TLSConfig = map[string]string{ + "": "TLSConfig defines config used to secure a route and provide termination", + "termination": "termination indicates termination type.", + "certificate": "certificate provides certificate contents", + "key": "key provides key file contents", + "caCertificate": "caCertificate provides the cert authority certificate contents", + "destinationCACertificate": "destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt termination this file should be provided in order to have routers use it for health checks on the secure connection. If this field is not specified, the router may provide its own destination CA and perform hostname validation using the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically verify.", + "insecureEdgeTerminationPolicy": "insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While each router may make its own decisions on which ports to expose, this is normally port 80.\n\n* Allow - traffic is sent to the server on the insecure port (default) * Disable - no traffic is allowed on the insecure port. * Redirect - clients are redirected to the secure port.", +} + +func (TLSConfig) SwaggerDoc() map[string]string { + return map_TLSConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/samples/install.go b/vendor/github.com/openshift/api/samples/install.go new file mode 100644 index 0000000000000000000000000000000000000000..8ad4d81978a8e7af67e5b5bf8a2be28f1414f12d --- /dev/null +++ b/vendor/github.com/openshift/api/samples/install.go @@ -0,0 +1,26 @@ +package samples + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + samplesv1 "github.com/openshift/api/samples/v1" +) + +const ( + GroupName = "samples.operator.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(samplesv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/samples/v1/0000_10_samplesconfig.crd.yaml b/vendor/github.com/openshift/api/samples/v1/0000_10_samplesconfig.crd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1318d4fb107b29bbaeafecfdafe45493cf12acf3 --- /dev/null +++ b/vendor/github.com/openshift/api/samples/v1/0000_10_samplesconfig.crd.yaml @@ -0,0 +1,175 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: configs.samples.operator.openshift.io + annotations: + displayName: ConfigsSamples + description: Extension for configuring openshif samples operator. +spec: + scope: Cluster + subresources: + status: {} + preserveUnknownFields: false + group: samples.operator.openshift.io + versions: + - name: v1 + served: true + storage: true + names: + plural: configs + singular: config + kind: Config + listKind: ConfigList + "validation": + "openAPIV3Schema": + description: Config contains the configuration and detailed condition status + for the Samples Operator. + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConfigSpec contains the desired configuration and state for + the Samples Operator, controlling various behavior around the imagestreams + and templates it creates/updates in the openshift namespace. + type: object + properties: + architectures: + description: architectures determine which hardware architecture(s) + to install, where x86_64, ppc64le, and s390x are the only supported + choices currently. + type: array + items: + type: string + managementState: + description: managementState is top level on/off type of switch for + all operators. When "Managed", this operator processes config and + manipulates the samples accordingly. When "Unmanaged", this operator + ignores any updates to the resources it watches. When "Removed", it + reacts that same wasy as it does if the Config object is deleted, + meaning any ImageStreams or Templates it manages (i.e. it honors the + skipped lists) and the registry secret are deleted, along with the + ConfigMap in the operator's namespace that represents the last config + used to manipulate the samples, + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + samplesRegistry: + description: samplesRegistry allows for the specification of which registry + is accessed by the ImageStreams for their image content. Defaults + on the content in https://github.com/openshift/library that are pulled + into this github repository, but based on our pulling only ocp content + it typically defaults to registry.redhat.io. + type: string + skippedImagestreams: + description: skippedImagestreams specifies names of image streams that + should NOT be created/updated. Admins can use this to allow them + to delete content they don’t want. They will still have to manually + delete the content but the operator will not recreate(or update) anything + listed here. + type: array + items: + type: string + skippedTemplates: + description: skippedTemplates specifies names of templates that should + NOT be created/updated. Admins can use this to allow them to delete + content they don’t want. They will still have to manually delete + the content but the operator will not recreate(or update) anything + listed here. + type: array + items: + type: string + status: + description: ConfigStatus contains the actual configuration in effect, as + well as various details that describe the state of the Samples Operator. + type: object + properties: + architectures: + description: architectures determine which hardware architecture(s) + to install, where x86_64 and ppc64le are the supported choices. + type: array + items: + type: string + conditions: + description: conditions represents the available maintenance status + of the sample imagestreams and templates. + type: array + items: + description: ConfigCondition captures various conditions of the Config + as entries are processed. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. + type: string + format: date-time + lastUpdateTime: + description: lastUpdateTime is the last time this condition was + updated. + type: string + format: date-time + message: + description: message is a human readable message indicating details + about the transition. + type: string + reason: + description: reason is what caused the condition's last transition. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type of condition. + type: string + managementState: + description: managementState reflects the current operational status + of the on/off switch for the operator. This operator compares the + ManagementState as part of determining that we are turning the operator + back on (i.e. "Managed") when it was previously "Unmanaged". + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + samplesRegistry: + description: samplesRegistry allows for the specification of which registry + is accessed by the ImageStreams for their image content. Defaults + on the content in https://github.com/openshift/library that are pulled + into this github repository, but based on our pulling only ocp content + it typically defaults to registry.redhat.io. + type: string + skippedImagestreams: + description: skippedImagestreams specifies names of image streams that + should NOT be created/updated. Admins can use this to allow them + to delete content they don’t want. They will still have to manually + delete the content but the operator will not recreate(or update) anything + listed here. + type: array + items: + type: string + skippedTemplates: + description: skippedTemplates specifies names of templates that should + NOT be created/updated. Admins can use this to allow them to delete + content they don’t want. They will still have to manually delete + the content but the operator will not recreate(or update) anything + listed here. + type: array + items: + type: string + version: + description: version is the value of the operator's payload based version + indicator when it was last successfully processed + type: string diff --git a/vendor/github.com/openshift/api/samples/v1/doc.go b/vendor/github.com/openshift/api/samples/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..d63c96b778cec5b7ba016ca9693fcac8239b9a59 --- /dev/null +++ b/vendor/github.com/openshift/api/samples/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=samples.operator.openshift.io +// Package v1 ist he v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/samples/v1/generated.pb.go b/vendor/github.com/openshift/api/samples/v1/generated.pb.go new file mode 100644 index 0000000000000000000000000000000000000000..5e365062b55cda3b695f15ab40d76619fceab186 --- /dev/null +++ b/vendor/github.com/openshift/api/samples/v1/generated.pb.go @@ -0,0 +1,1888 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/samples/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_openshift_api_operator_v1 "github.com/openshift/api/operator/v1" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Config) Reset() { *m = Config{} } +func (*Config) ProtoMessage() {} +func (*Config) Descriptor() ([]byte, []int) { + return fileDescriptor_67d62912ac03ce1e, []int{0} +} +func (m *Config) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Config) XXX_Merge(src proto.Message) { + xxx_messageInfo_Config.Merge(m, src) +} +func (m *Config) XXX_Size() int { + return m.Size() +} +func (m *Config) XXX_DiscardUnknown() { + xxx_messageInfo_Config.DiscardUnknown(m) +} + +var xxx_messageInfo_Config proto.InternalMessageInfo + +func (m *ConfigCondition) Reset() { *m = ConfigCondition{} } +func (*ConfigCondition) ProtoMessage() {} +func (*ConfigCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_67d62912ac03ce1e, []int{1} +} +func (m *ConfigCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigCondition.Merge(m, src) +} +func (m *ConfigCondition) XXX_Size() int { + return m.Size() +} +func (m *ConfigCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigCondition proto.InternalMessageInfo + +func (m *ConfigList) Reset() { *m = ConfigList{} } +func (*ConfigList) ProtoMessage() {} +func (*ConfigList) Descriptor() ([]byte, []int) { + return fileDescriptor_67d62912ac03ce1e, []int{2} +} +func (m *ConfigList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigList.Merge(m, src) +} +func (m *ConfigList) XXX_Size() int { + return m.Size() +} +func (m *ConfigList) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigList.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigList proto.InternalMessageInfo + +func (m *ConfigSpec) Reset() { *m = ConfigSpec{} } +func (*ConfigSpec) ProtoMessage() {} +func (*ConfigSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_67d62912ac03ce1e, []int{3} +} +func (m *ConfigSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigSpec.Merge(m, src) +} +func (m *ConfigSpec) XXX_Size() int { + return m.Size() +} +func (m *ConfigSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigSpec proto.InternalMessageInfo + +func (m *ConfigStatus) Reset() { *m = ConfigStatus{} } +func (*ConfigStatus) ProtoMessage() {} +func (*ConfigStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_67d62912ac03ce1e, []int{4} +} +func (m *ConfigStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigStatus.Merge(m, src) +} +func (m *ConfigStatus) XXX_Size() int { + return m.Size() +} +func (m *ConfigStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Config)(nil), "github.com.openshift.api.samples.v1.Config") + proto.RegisterType((*ConfigCondition)(nil), "github.com.openshift.api.samples.v1.ConfigCondition") + proto.RegisterType((*ConfigList)(nil), "github.com.openshift.api.samples.v1.ConfigList") + proto.RegisterType((*ConfigSpec)(nil), "github.com.openshift.api.samples.v1.ConfigSpec") + proto.RegisterType((*ConfigStatus)(nil), "github.com.openshift.api.samples.v1.ConfigStatus") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/samples/v1/generated.proto", fileDescriptor_67d62912ac03ce1e) +} + +var fileDescriptor_67d62912ac03ce1e = []byte{ + // 801 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x54, 0xcb, 0x6e, 0xe4, 0x44, + 0x14, 0x6d, 0xa7, 0x1f, 0xc9, 0xd4, 0x4c, 0xd2, 0xa1, 0x82, 0x18, 0x2b, 0x0b, 0x7b, 0xd4, 0x23, + 0xa1, 0x00, 0xa2, 0x4c, 0x0f, 0x11, 0xc3, 0x92, 0xf1, 0xac, 0x22, 0x25, 0x1a, 0xa8, 0x34, 0x48, + 0x20, 0x16, 0x54, 0xdc, 0x37, 0xee, 0x9a, 0x8e, 0xed, 0x92, 0xab, 0xba, 0xa5, 0xde, 0xf1, 0x09, + 0xb3, 0xe4, 0x0f, 0xf8, 0x09, 0x3e, 0x20, 0x3b, 0x66, 0x39, 0x2b, 0x8b, 0x98, 0xbf, 0xc8, 0x0a, + 0x55, 0xd9, 0xee, 0xf7, 0x88, 0xb4, 0x82, 0xc4, 0x2e, 0x75, 0xee, 0x3d, 0xe7, 0x5e, 0xf7, 0x39, + 0xb9, 0xe8, 0xcb, 0x90, 0xab, 0xc1, 0xe8, 0x82, 0x04, 0x49, 0xe4, 0x25, 0x02, 0x62, 0x39, 0xe0, + 0x97, 0xca, 0x63, 0x82, 0x7b, 0x92, 0x45, 0xe2, 0x0a, 0xa4, 0x37, 0xee, 0x7a, 0x21, 0xc4, 0x90, + 0x32, 0x05, 0x7d, 0x22, 0xd2, 0x44, 0x25, 0xf8, 0xe9, 0x8c, 0x44, 0xa6, 0x24, 0xc2, 0x04, 0x27, + 0x25, 0x89, 0x8c, 0xbb, 0x87, 0x9f, 0xcf, 0x29, 0x87, 0x49, 0x98, 0x78, 0x86, 0x7b, 0x31, 0xba, + 0x34, 0x2f, 0xf3, 0x30, 0x7f, 0x15, 0x9a, 0x87, 0x9d, 0xe1, 0xd7, 0x92, 0xf0, 0xc4, 0x8c, 0x0e, + 0x92, 0x14, 0xd6, 0xcc, 0x3d, 0x3c, 0x9e, 0xf5, 0x44, 0x2c, 0x18, 0xf0, 0x18, 0xd2, 0x89, 0x27, + 0x86, 0xa1, 0x06, 0xa4, 0x17, 0x81, 0x62, 0xeb, 0x58, 0xde, 0xfb, 0x58, 0xe9, 0x28, 0x56, 0x3c, + 0x82, 0x15, 0xc2, 0x57, 0xff, 0x46, 0x90, 0xc1, 0x00, 0x22, 0xb6, 0xcc, 0xeb, 0xfc, 0xb6, 0x85, + 0x5a, 0x2f, 0x93, 0xf8, 0x92, 0x87, 0xf8, 0x17, 0xb4, 0xa3, 0xd7, 0xe9, 0x33, 0xc5, 0x6c, 0xeb, + 0x89, 0x75, 0xf4, 0xf0, 0xd9, 0x17, 0xa4, 0x50, 0x25, 0xf3, 0xaa, 0x44, 0x0c, 0x43, 0x0d, 0x48, + 0xa2, 0xbb, 0xc9, 0xb8, 0x4b, 0x5e, 0x5d, 0xbc, 0x86, 0x40, 0x9d, 0x81, 0x62, 0x3e, 0xbe, 0xce, + 0xdc, 0x5a, 0x9e, 0xb9, 0x68, 0x86, 0xd1, 0xa9, 0x2a, 0xfe, 0x0e, 0x35, 0xa4, 0x80, 0xc0, 0xde, + 0x32, 0xea, 0x1e, 0xb9, 0x83, 0x25, 0xa4, 0x58, 0xee, 0x5c, 0x40, 0xe0, 0x3f, 0x2a, 0xc5, 0x1b, + 0xfa, 0x45, 0x8d, 0x14, 0xfe, 0x11, 0xb5, 0xa4, 0x62, 0x6a, 0x24, 0xed, 0xba, 0x11, 0xed, 0x6e, + 0x22, 0x6a, 0x88, 0xfe, 0x5e, 0x29, 0xdb, 0x2a, 0xde, 0xb4, 0x14, 0xec, 0xfc, 0x59, 0x47, 0xed, + 0xa2, 0xf1, 0x65, 0x12, 0xf7, 0xb9, 0xe2, 0x49, 0x8c, 0x9f, 0xa3, 0x86, 0x9a, 0x08, 0x30, 0xbf, + 0xcf, 0x03, 0xff, 0x69, 0xb5, 0x50, 0x6f, 0x22, 0xe0, 0x36, 0x73, 0x0f, 0x96, 0xda, 0x35, 0x4c, + 0x0d, 0x01, 0x9f, 0x4e, 0xf7, 0xdc, 0x32, 0xd4, 0xe3, 0xc5, 0xa1, 0xb7, 0x99, 0xbb, 0x26, 0x4c, + 0x64, 0xaa, 0xb4, 0xb8, 0x1a, 0x7e, 0x8d, 0xf6, 0xae, 0x98, 0x54, 0xdf, 0x8b, 0x3e, 0x53, 0xd0, + 0xe3, 0x11, 0x94, 0x5f, 0xff, 0xe9, 0xdd, 0x0c, 0xd3, 0x0c, 0xff, 0xa3, 0x72, 0x83, 0xbd, 0xd3, + 0x05, 0x25, 0xba, 0xa4, 0x8c, 0xc7, 0x08, 0x6b, 0xa4, 0x97, 0xb2, 0x58, 0x16, 0x5f, 0xa5, 0xe7, + 0x35, 0x36, 0x9e, 0x77, 0x58, 0xce, 0xc3, 0xa7, 0x2b, 0x6a, 0x74, 0xcd, 0x04, 0xfc, 0x31, 0x6a, + 0xa5, 0xc0, 0x64, 0x12, 0xdb, 0x4d, 0xf3, 0x8b, 0x4d, 0x6d, 0xa2, 0x06, 0xa5, 0x65, 0x15, 0x7f, + 0x82, 0xb6, 0x23, 0x90, 0x92, 0x85, 0x60, 0xb7, 0x4c, 0x63, 0xbb, 0x6c, 0xdc, 0x3e, 0x2b, 0x60, + 0x5a, 0xd5, 0x3b, 0x7f, 0x58, 0x08, 0x15, 0x16, 0x9d, 0x72, 0xa9, 0xf0, 0xcf, 0x2b, 0x81, 0x27, + 0x77, 0xfb, 0x1e, 0xcd, 0x36, 0x71, 0xdf, 0x2f, 0x47, 0xed, 0x54, 0xc8, 0x5c, 0xd8, 0xbf, 0x45, + 0x4d, 0xae, 0x20, 0xd2, 0x86, 0xd7, 0x8f, 0x1e, 0x3e, 0xfb, 0x6c, 0x83, 0x60, 0xfa, 0xbb, 0xa5, + 0x6e, 0xf3, 0x44, 0x2b, 0xd0, 0x42, 0xa8, 0xf3, 0xa6, 0x5e, 0xad, 0xaf, 0xff, 0x01, 0xf0, 0x04, + 0xb5, 0x23, 0x16, 0xb3, 0x10, 0x22, 0x88, 0x95, 0x0e, 0x48, 0x15, 0xcb, 0x57, 0x25, 0xbb, 0x7d, + 0xb6, 0x58, 0xbe, 0xcd, 0xdc, 0xe3, 0xf7, 0x9e, 0xce, 0x44, 0xe8, 0xbb, 0x90, 0xa4, 0x3a, 0x76, + 0x4b, 0x3c, 0xba, 0x3c, 0x07, 0xbf, 0x40, 0xed, 0x72, 0x69, 0x0a, 0x21, 0x97, 0x2a, 0x9d, 0x94, + 0xb1, 0x7e, 0x5c, 0x8d, 0x3e, 0x5f, 0x2c, 0xd3, 0xe5, 0x7e, 0xfc, 0x1c, 0xed, 0xb2, 0x34, 0x18, + 0x70, 0x05, 0x81, 0x1a, 0xa5, 0x20, 0xed, 0xc6, 0x93, 0xfa, 0xd1, 0x03, 0xff, 0x83, 0x3c, 0x73, + 0x77, 0x5f, 0xcc, 0x17, 0xe8, 0x62, 0x1f, 0x3e, 0x41, 0x07, 0x72, 0xc8, 0x85, 0x80, 0xfe, 0x49, + 0xc4, 0x42, 0x90, 0x2a, 0x05, 0x16, 0x49, 0xbb, 0x69, 0xe8, 0x8f, 0xf3, 0xcc, 0x3d, 0x38, 0x5f, + 0x2d, 0xd3, 0x75, 0x1c, 0xfc, 0x0d, 0xda, 0x2f, 0xe1, 0x1e, 0x44, 0xe2, 0x8a, 0x29, 0x90, 0x76, + 0xcb, 0xe8, 0x7c, 0x98, 0x67, 0xee, 0xfe, 0xf9, 0x52, 0x8d, 0xae, 0x74, 0x77, 0x7e, 0x6f, 0xa0, + 0x47, 0xf3, 0xc7, 0xe4, 0xff, 0x34, 0x65, 0x80, 0x50, 0x50, 0xdd, 0x8b, 0x2a, 0x75, 0xc7, 0x1b, + 0xa4, 0x6e, 0x7a, 0x6c, 0x66, 0x57, 0x7c, 0x0a, 0x49, 0x3a, 0xa7, 0xbd, 0xce, 0xfe, 0xfa, 0x7d, + 0xed, 0x6f, 0xde, 0xcf, 0xfe, 0xd6, 0x7f, 0x64, 0xff, 0xf6, 0x26, 0xf6, 0xeb, 0xdb, 0x33, 0x86, + 0x54, 0xf2, 0x24, 0xb6, 0x77, 0x16, 0x6f, 0xcf, 0x0f, 0x05, 0x4c, 0xab, 0xba, 0x7f, 0x74, 0x7d, + 0xe3, 0xd4, 0xde, 0xde, 0x38, 0xb5, 0x77, 0x37, 0x4e, 0xed, 0xd7, 0xdc, 0xb1, 0xae, 0x73, 0xc7, + 0x7a, 0x9b, 0x3b, 0xd6, 0xbb, 0xdc, 0xb1, 0xfe, 0xca, 0x1d, 0xeb, 0xcd, 0xdf, 0x4e, 0xed, 0xa7, + 0xad, 0x71, 0xf7, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x05, 0xb4, 0x66, 0xc9, 0xdf, 0x08, 0x00, + 0x00, +} + +func (m *Config) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Config) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Config) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConfigCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConfigList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConfigSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SkippedTemplates) > 0 { + for iNdEx := len(m.SkippedTemplates) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SkippedTemplates[iNdEx]) + copy(dAtA[i:], m.SkippedTemplates[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SkippedTemplates[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.SkippedImagestreams) > 0 { + for iNdEx := len(m.SkippedImagestreams) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SkippedImagestreams[iNdEx]) + copy(dAtA[i:], m.SkippedImagestreams[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SkippedImagestreams[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Architectures) > 0 { + for iNdEx := len(m.Architectures) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Architectures[iNdEx]) + copy(dAtA[i:], m.Architectures[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architectures[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.SamplesRegistry) + copy(dAtA[i:], m.SamplesRegistry) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SamplesRegistry))) + i-- + dAtA[i] = 0x12 + i -= len(m.ManagementState) + copy(dAtA[i:], m.ManagementState) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ManagementState))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConfigStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x42 + if len(m.SkippedTemplates) > 0 { + for iNdEx := len(m.SkippedTemplates) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SkippedTemplates[iNdEx]) + copy(dAtA[i:], m.SkippedTemplates[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SkippedTemplates[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.SkippedImagestreams) > 0 { + for iNdEx := len(m.SkippedImagestreams) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SkippedImagestreams[iNdEx]) + copy(dAtA[i:], m.SkippedImagestreams[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SkippedImagestreams[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Architectures) > 0 { + for iNdEx := len(m.Architectures) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Architectures[iNdEx]) + copy(dAtA[i:], m.Architectures[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architectures[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.SamplesRegistry) + copy(dAtA[i:], m.SamplesRegistry) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SamplesRegistry))) + i-- + dAtA[i] = 0x1a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.ManagementState) + copy(dAtA[i:], m.ManagementState) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ManagementState))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Config) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConfigCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConfigList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConfigSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ManagementState) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SamplesRegistry) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Architectures) > 0 { + for _, s := range m.Architectures { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.SkippedImagestreams) > 0 { + for _, s := range m.SkippedImagestreams { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.SkippedTemplates) > 0 { + for _, s := range m.SkippedTemplates { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConfigStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ManagementState) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SamplesRegistry) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Architectures) > 0 { + for _, s := range m.Architectures { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.SkippedImagestreams) > 0 { + for _, s := range m.SkippedImagestreams { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.SkippedTemplates) > 0 { + for _, s := range m.SkippedTemplates { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Config{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ConfigSpec", "ConfigSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ConfigStatus", "ConfigStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Config{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Config", "Config", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ConfigList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ConfigSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigSpec{`, + `ManagementState:` + fmt.Sprintf("%v", this.ManagementState) + `,`, + `SamplesRegistry:` + fmt.Sprintf("%v", this.SamplesRegistry) + `,`, + `Architectures:` + fmt.Sprintf("%v", this.Architectures) + `,`, + `SkippedImagestreams:` + fmt.Sprintf("%v", this.SkippedImagestreams) + `,`, + `SkippedTemplates:` + fmt.Sprintf("%v", this.SkippedTemplates) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]ConfigCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ConfigCondition", "ConfigCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ConfigStatus{`, + `ManagementState:` + fmt.Sprintf("%v", this.ManagementState) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `SamplesRegistry:` + fmt.Sprintf("%v", this.SamplesRegistry) + `,`, + `Architectures:` + fmt.Sprintf("%v", this.Architectures) + `,`, + `SkippedImagestreams:` + fmt.Sprintf("%v", this.SkippedImagestreams) + `,`, + `SkippedTemplates:` + fmt.Sprintf("%v", this.SkippedTemplates) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Config) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Config: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Config: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ConfigConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Config{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagementState", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ManagementState = github_com_openshift_api_operator_v1.ManagementState(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SamplesRegistry", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SamplesRegistry = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architectures", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architectures = append(m.Architectures, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SkippedImagestreams", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SkippedImagestreams = append(m.SkippedImagestreams, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SkippedTemplates", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SkippedTemplates = append(m.SkippedTemplates, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagementState", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ManagementState = github_com_openshift_api_operator_v1.ManagementState(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ConfigCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SamplesRegistry", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SamplesRegistry = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architectures", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architectures = append(m.Architectures, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SkippedImagestreams", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SkippedImagestreams = append(m.SkippedImagestreams, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SkippedTemplates", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SkippedTemplates = append(m.SkippedTemplates, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/openshift/api/samples/v1/generated.proto b/vendor/github.com/openshift/api/samples/v1/generated.proto new file mode 100644 index 0000000000000000000000000000000000000000..47f5b93c1d78a09048c46d6c44557282b32b3950 --- /dev/null +++ b/vendor/github.com/openshift/api/samples/v1/generated.proto @@ -0,0 +1,147 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package github.com.openshift.api.samples.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Config contains the configuration and detailed condition status for the Samples Operator. +message Config { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // +kubebuilder:validation:Required + // +required + optional ConfigSpec spec = 2; + + // +optional + optional ConfigStatus status = 3; +} + +// ConfigCondition captures various conditions of the Config +// as entries are processed. +message ConfigCondition { + // type of condition. + optional string type = 1; + + // status of the condition, one of True, False, Unknown. + optional string status = 2; + + // lastUpdateTime is the last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 3; + + // lastTransitionTime is the last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // reason is what caused the condition's last transition. + optional string reason = 5; + + // message is a human readable message indicating details about the transition. + optional string message = 6; +} + +message ConfigList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated Config items = 2; +} + +// ConfigSpec contains the desired configuration and state for the Samples Operator, controlling +// various behavior around the imagestreams and templates it creates/updates in the +// openshift namespace. +message ConfigSpec { + // managementState is top level on/off type of switch for all operators. + // When "Managed", this operator processes config and manipulates the samples accordingly. + // When "Unmanaged", this operator ignores any updates to the resources it watches. + // When "Removed", it reacts that same wasy as it does if the Config object + // is deleted, meaning any ImageStreams or Templates it manages (i.e. it honors the skipped + // lists) and the registry secret are deleted, along with the ConfigMap in the operator's + // namespace that represents the last config used to manipulate the samples, + optional string managementState = 1; + + // samplesRegistry allows for the specification of which registry is accessed + // by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library + // that are pulled into this github repository, but based on our pulling only ocp content it typically + // defaults to registry.redhat.io. + optional string samplesRegistry = 2; + + // architectures determine which hardware architecture(s) to install, where x86_64, ppc64le, and s390x are the only + // supported choices currently. + repeated string architectures = 4; + + // skippedImagestreams specifies names of image streams that should NOT be + // created/updated. Admins can use this to allow them to delete content + // they don’t want. They will still have to manually delete the + // content but the operator will not recreate(or update) anything + // listed here. + repeated string skippedImagestreams = 5; + + // skippedTemplates specifies names of templates that should NOT be + // created/updated. Admins can use this to allow them to delete content + // they don’t want. They will still have to manually delete the + // content but the operator will not recreate(or update) anything + // listed here. + repeated string skippedTemplates = 6; +} + +// ConfigStatus contains the actual configuration in effect, as well as various details +// that describe the state of the Samples Operator. +message ConfigStatus { + // managementState reflects the current operational status of the on/off switch for + // the operator. This operator compares the ManagementState as part of determining that we are turning + // the operator back on (i.e. "Managed") when it was previously "Unmanaged". + // +patchMergeKey=type + // +patchStrategy=merge + optional string managementState = 1; + + // conditions represents the available maintenance status of the sample + // imagestreams and templates. + // +patchMergeKey=type + // +patchStrategy=merge + repeated ConfigCondition conditions = 2; + + // samplesRegistry allows for the specification of which registry is accessed + // by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library + // that are pulled into this github repository, but based on our pulling only ocp content it typically + // defaults to registry.redhat.io. + // +patchMergeKey=type + // +patchStrategy=merge + optional string samplesRegistry = 3; + + // architectures determine which hardware architecture(s) to install, where x86_64 and ppc64le are the + // supported choices. + // +patchMergeKey=type + // +patchStrategy=merge + repeated string architectures = 5; + + // skippedImagestreams specifies names of image streams that should NOT be + // created/updated. Admins can use this to allow them to delete content + // they don’t want. They will still have to manually delete the + // content but the operator will not recreate(or update) anything + // listed here. + // +patchMergeKey=type + // +patchStrategy=merge + repeated string skippedImagestreams = 6; + + // skippedTemplates specifies names of templates that should NOT be + // created/updated. Admins can use this to allow them to delete content + // they don’t want. They will still have to manually delete the + // content but the operator will not recreate(or update) anything + // listed here. + // +patchMergeKey=type + // +patchStrategy=merge + repeated string skippedTemplates = 7; + + // version is the value of the operator's payload based version indicator when it was last successfully processed + // +patchMergeKey=type + // +patchStrategy=merge + optional string version = 8; +} + diff --git a/vendor/github.com/openshift/api/samples/v1/register.go b/vendor/github.com/openshift/api/samples/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..b861cf789183ea15d99a8801d590dc9b5315bbb1 --- /dev/null +++ b/vendor/github.com/openshift/api/samples/v1/register.go @@ -0,0 +1,46 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + Version = "v1" + GroupName = "samples.operator.openshift.io" +) + +var ( + scheme = runtime.NewScheme() + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme + // SchemeGroupVersion is the group version used to register these objects. + SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: Version} + // Install is a function which adds this version to a scheme + Install = SchemeBuilder.AddToScheme +) + +func init() { + AddToScheme(scheme) +} + +// addKnownTypes adds the set of types defined in this package to the supplied scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Config{}, + &ConfigList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/github.com/openshift/api/samples/v1/types_config.go b/vendor/github.com/openshift/api/samples/v1/types_config.go new file mode 100644 index 0000000000000000000000000000000000000000..01d932d5bc0ddaeb5c2c15f4373bda8ef2056ac9 --- /dev/null +++ b/vendor/github.com/openshift/api/samples/v1/types_config.go @@ -0,0 +1,226 @@ +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Config contains the configuration and detailed condition status for the Samples Operator. +type Config struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + // +kubebuilder:validation:Required + // +required + Spec ConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // +optional + Status ConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ConfigSpec contains the desired configuration and state for the Samples Operator, controlling +// various behavior around the imagestreams and templates it creates/updates in the +// openshift namespace. +type ConfigSpec struct { + // managementState is top level on/off type of switch for all operators. + // When "Managed", this operator processes config and manipulates the samples accordingly. + // When "Unmanaged", this operator ignores any updates to the resources it watches. + // When "Removed", it reacts that same wasy as it does if the Config object + // is deleted, meaning any ImageStreams or Templates it manages (i.e. it honors the skipped + // lists) and the registry secret are deleted, along with the ConfigMap in the operator's + // namespace that represents the last config used to manipulate the samples, + ManagementState operatorv1.ManagementState `json:"managementState,omitempty" protobuf:"bytes,1,opt,name=managementState"` + + // samplesRegistry allows for the specification of which registry is accessed + // by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library + // that are pulled into this github repository, but based on our pulling only ocp content it typically + // defaults to registry.redhat.io. + SamplesRegistry string `json:"samplesRegistry,omitempty" protobuf:"bytes,2,opt,name=samplesRegistry"` + + // architectures determine which hardware architecture(s) to install, where x86_64, ppc64le, and s390x are the only + // supported choices currently. + Architectures []string `json:"architectures,omitempty" protobuf:"bytes,4,opt,name=architectures"` + + // skippedImagestreams specifies names of image streams that should NOT be + // created/updated. Admins can use this to allow them to delete content + // they don’t want. They will still have to manually delete the + // content but the operator will not recreate(or update) anything + // listed here. + SkippedImagestreams []string `json:"skippedImagestreams,omitempty" protobuf:"bytes,5,opt,name=skippedImagestreams"` + + // skippedTemplates specifies names of templates that should NOT be + // created/updated. Admins can use this to allow them to delete content + // they don’t want. They will still have to manually delete the + // content but the operator will not recreate(or update) anything + // listed here. + SkippedTemplates []string `json:"skippedTemplates,omitempty" protobuf:"bytes,6,opt,name=skippedTemplates"` +} + +// ConfigStatus contains the actual configuration in effect, as well as various details +// that describe the state of the Samples Operator. +type ConfigStatus struct { + // managementState reflects the current operational status of the on/off switch for + // the operator. This operator compares the ManagementState as part of determining that we are turning + // the operator back on (i.e. "Managed") when it was previously "Unmanaged". + // +patchMergeKey=type + // +patchStrategy=merge + ManagementState operatorv1.ManagementState `json:"managementState,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=managementState"` + // conditions represents the available maintenance status of the sample + // imagestreams and templates. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []ConfigCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` + + // samplesRegistry allows for the specification of which registry is accessed + // by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library + // that are pulled into this github repository, but based on our pulling only ocp content it typically + // defaults to registry.redhat.io. + // +patchMergeKey=type + // +patchStrategy=merge + SamplesRegistry string `json:"samplesRegistry,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,3,rep,name=samplesRegistry"` + + // architectures determine which hardware architecture(s) to install, where x86_64 and ppc64le are the + // supported choices. + // +patchMergeKey=type + // +patchStrategy=merge + Architectures []string `json:"architectures,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=architectures"` + + // skippedImagestreams specifies names of image streams that should NOT be + // created/updated. Admins can use this to allow them to delete content + // they don’t want. They will still have to manually delete the + // content but the operator will not recreate(or update) anything + // listed here. + // +patchMergeKey=type + // +patchStrategy=merge + SkippedImagestreams []string `json:"skippedImagestreams,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=skippedImagestreams"` + + // skippedTemplates specifies names of templates that should NOT be + // created/updated. Admins can use this to allow them to delete content + // they don’t want. They will still have to manually delete the + // content but the operator will not recreate(or update) anything + // listed here. + // +patchMergeKey=type + // +patchStrategy=merge + SkippedTemplates []string `json:"skippedTemplates,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,7,rep,name=skippedTemplates"` + + // version is the value of the operator's payload based version indicator when it was last successfully processed + // +patchMergeKey=type + // +patchStrategy=merge + Version string `json:"version,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,8,rep,name=version"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Items []Config `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +const ( + // SamplesRegistryCredentials is the name for a secret that contains a username+password/token + // for the registry, where if the secret is present, will be used for authentication. + // The corresponding secret is required to already be formatted as a + // dockerconfig secret so that it can just be copied + // to the openshift namespace + // for use during imagestream import. + SamplesRegistryCredentials = "samples-registry-credentials" + // ConfigName is the name/identifier of the static, singleton operator employed for the samples. + ConfigName = "cluster" + // X86Architecture is the value used to specify the x86_64 hardware architecture + // in the Architectures array field. + X86Architecture = "x86_64" + // AMDArchitecture is the golang value for x86 64 bit hardware architecture; for the purposes + // of this operator, it is equivalent to X86Architecture, which is kept for historical/migration + // purposes + AMDArchitecture = "amd64" + // PPCArchitecture is the value used to specify the x86_64 hardware architecture + // in the Architectures array field. + PPCArchitecture = "ppc64le" + // S390Architecture is the value used to specify the s390x hardware architecture + // in the Architecture array field. + S390Architecture = "s390x" + // ConfigFinalizer is the text added to the Config.Finalizer field + // to enable finalizer processing. + ConfigFinalizer = GroupName + "/finalizer" + // SamplesManagedLabel is the key for a label added to all the imagestreams and templates + // in the openshift namespace that the Config is managing. This label is adjusted + // when changes to the SkippedImagestreams and SkippedTemplates fields are made. + SamplesManagedLabel = GroupName + "/managed" + // SamplesVersionAnnotation is the key for an annotation set on the imagestreams, templates, + // and secret that this operator manages that signifies the version of the operator that + // last managed the particular resource. + SamplesVersionAnnotation = GroupName + "/version" + // SamplesRecreateCredentialAnnotation is the key for an annotation set on the secret used + // for authentication when configuration moves from Removed to Managed but the associated secret + // in the openshift namespace does not exist. This will initiate creation of the credential + // in the openshift namespace. + SamplesRecreateCredentialAnnotation = GroupName + "/recreate" + // OperatorNamespace is the namespace the operator runs in. + OperatorNamespace = "openshift-cluster-samples-operator" +) + +type ConfigConditionType string + +// the valid conditions of the Config + +const ( + // ImportCredentialsExist represents the state of any credentials specified by + // the SamplesRegistry field in the Spec. + ImportCredentialsExist ConfigConditionType = "ImportCredentialsExist" + // SamplesExist represents whether an incoming Config has been successfully + // processed or not all, or whether the last Config to come in has been + // successfully processed. + SamplesExist ConfigConditionType = "SamplesExist" + // ConfigurationValid represents whether the latest Config to come in + // tried to make a support configuration change. Currently, changes to the + // InstallType and Architectures list after initial processing is not allowed. + ConfigurationValid ConfigConditionType = "ConfigurationValid" + // ImageChangesInProgress represents the state between where the samples operator has + // started updating the imagestreams and when the spec and status generations for each + // tag match. The list of imagestreams that are still in progress will be stored + // in the Reason field of the condition. The Reason field being empty corresponds + // with this condition being marked true. + ImageChangesInProgress ConfigConditionType = "ImageChangesInProgress" + // RemovePending represents whether the Config Spec ManagementState + // has been set to Removed, but we have not completed the deletion of the + // samples, pull secret, etc. and set the Config Spec ManagementState to Removed. + // Also note, while a samples creation/update cycle is still in progress, and ImageChagesInProgress + // is True, the operator will not initiate the deletions, as we + // do not want the create/updates and deletes of the samples to be occurring in parallel. + // So the actual Removed processing will be initated only after ImageChangesInProgress is set + // to false. Once the deletions are done, and the Status ManagementState is Removed, this + // condition is set back to False. Lastly, when this condition is set to True, the + // ClusterOperator Progressing condition will be set to True. + RemovePending ConfigConditionType = "RemovePending" + // MigrationInProgress represents the special case where the operator is running off of + // a new version of its image, and samples are deployed of a previous version. This condition + // facilitates the maintenance of this operator's ClusterOperator object. + MigrationInProgress ConfigConditionType = "MigrationInProgress" + // ImportImageErrorsExist registers any image import failures, separate from ImageChangeInProgress, + // so that we can a) indicate a problem to the ClusterOperator status, b) mark the current + // change cycle as complete in both ClusterOperator and Config; retry on import will + // occur by the next relist interval if it was an intermittent issue; + ImportImageErrorsExist ConfigConditionType = "ImportImageErrorsExist" +) + +// ConfigCondition captures various conditions of the Config +// as entries are processed. +type ConfigCondition struct { + // type of condition. + Type ConfigConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ConfigConditionType"` + // status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // lastUpdateTime is the last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,3,opt,name=lastUpdateTime"` + // lastTransitionTime is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // reason is what caused the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // message is a human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} diff --git a/vendor/github.com/openshift/api/samples/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/samples/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..ccac05615170386e7af4426993d874440e3e04e4 --- /dev/null +++ b/vendor/github.com/openshift/api/samples/v1/zz_generated.deepcopy.go @@ -0,0 +1,157 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Config) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigCondition) DeepCopyInto(out *ConfigCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigCondition. +func (in *ConfigCondition) DeepCopy() *ConfigCondition { + if in == nil { + return nil + } + out := new(ConfigCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigList) DeepCopyInto(out *ConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Config, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList. +func (in *ConfigList) DeepCopy() *ConfigList { + if in == nil { + return nil + } + out := new(ConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigSpec) DeepCopyInto(out *ConfigSpec) { + *out = *in + if in.Architectures != nil { + in, out := &in.Architectures, &out.Architectures + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SkippedImagestreams != nil { + in, out := &in.SkippedImagestreams, &out.SkippedImagestreams + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SkippedTemplates != nil { + in, out := &in.SkippedTemplates, &out.SkippedTemplates + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSpec. +func (in *ConfigSpec) DeepCopy() *ConfigSpec { + if in == nil { + return nil + } + out := new(ConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigStatus) DeepCopyInto(out *ConfigStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ConfigCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Architectures != nil { + in, out := &in.Architectures, &out.Architectures + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SkippedImagestreams != nil { + in, out := &in.SkippedImagestreams, &out.SkippedImagestreams + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SkippedTemplates != nil { + in, out := &in.SkippedTemplates, &out.SkippedTemplates + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigStatus. +func (in *ConfigStatus) DeepCopy() *ConfigStatus { + if in == nil { + return nil + } + out := new(ConfigStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/samples/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/samples/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..7fb0dd073f4924e1a2375d6be75482ff1429e2dc --- /dev/null +++ b/vendor/github.com/openshift/api/samples/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,64 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Config = map[string]string{ + "": "Config contains the configuration and detailed condition status for the Samples Operator.", +} + +func (Config) SwaggerDoc() map[string]string { + return map_Config +} + +var map_ConfigCondition = map[string]string{ + "": "ConfigCondition captures various conditions of the Config as entries are processed.", + "type": "type of condition.", + "status": "status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "lastUpdateTime is the last time this condition was updated.", + "lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another.", + "reason": "reason is what caused the condition's last transition.", + "message": "message is a human readable message indicating details about the transition.", +} + +func (ConfigCondition) SwaggerDoc() map[string]string { + return map_ConfigCondition +} + +var map_ConfigSpec = map[string]string{ + "": "ConfigSpec contains the desired configuration and state for the Samples Operator, controlling various behavior around the imagestreams and templates it creates/updates in the openshift namespace.", + "managementState": "managementState is top level on/off type of switch for all operators. When \"Managed\", this operator processes config and manipulates the samples accordingly. When \"Unmanaged\", this operator ignores any updates to the resources it watches. When \"Removed\", it reacts that same wasy as it does if the Config object is deleted, meaning any ImageStreams or Templates it manages (i.e. it honors the skipped lists) and the registry secret are deleted, along with the ConfigMap in the operator's namespace that represents the last config used to manipulate the samples,", + "samplesRegistry": "samplesRegistry allows for the specification of which registry is accessed by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library that are pulled into this github repository, but based on our pulling only ocp content it typically defaults to registry.redhat.io.", + "architectures": "architectures determine which hardware architecture(s) to install, where x86_64, ppc64le, and s390x are the only supported choices currently.", + "skippedImagestreams": "skippedImagestreams specifies names of image streams that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here.", + "skippedTemplates": "skippedTemplates specifies names of templates that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here.", +} + +func (ConfigSpec) SwaggerDoc() map[string]string { + return map_ConfigSpec +} + +var map_ConfigStatus = map[string]string{ + "": "ConfigStatus contains the actual configuration in effect, as well as various details that describe the state of the Samples Operator.", + "managementState": "managementState reflects the current operational status of the on/off switch for the operator. This operator compares the ManagementState as part of determining that we are turning the operator back on (i.e. \"Managed\") when it was previously \"Unmanaged\".", + "conditions": "conditions represents the available maintenance status of the sample imagestreams and templates.", + "samplesRegistry": "samplesRegistry allows for the specification of which registry is accessed by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library that are pulled into this github repository, but based on our pulling only ocp content it typically defaults to registry.redhat.io.", + "architectures": "architectures determine which hardware architecture(s) to install, where x86_64 and ppc64le are the supported choices.", + "skippedImagestreams": "skippedImagestreams specifies names of image streams that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here.", + "skippedTemplates": "skippedTemplates specifies names of templates that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here.", + "version": "version is the value of the operator's payload based version indicator when it was last successfully processed", +} + +func (ConfigStatus) SwaggerDoc() map[string]string { + return map_ConfigStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/security/install.go b/vendor/github.com/openshift/api/security/install.go new file mode 100644 index 0000000000000000000000000000000000000000..c2b04c43298ffc0fb17ba6e7918821a508c8a0ad --- /dev/null +++ b/vendor/github.com/openshift/api/security/install.go @@ -0,0 +1,26 @@ +package security + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + securityv1 "github.com/openshift/api/security/v1" +) + +const ( + GroupName = "security.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(securityv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/servicecertsigner/install.go b/vendor/github.com/openshift/api/servicecertsigner/install.go new file mode 100644 index 0000000000000000000000000000000000000000..98d891d34deed6378f0640f37ed4607f5689e08c --- /dev/null +++ b/vendor/github.com/openshift/api/servicecertsigner/install.go @@ -0,0 +1,26 @@ +package servicecertsigner + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + servicecertsignerv1alpha1 "github.com/openshift/api/servicecertsigner/v1alpha1" +) + +const ( + GroupName = "servicecertsigner.config.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(servicecertsignerv1alpha1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/doc.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..6ce02bdb3e52d96887b8de9a9b1d5785a1ce8779 --- /dev/null +++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/doc.go @@ -0,0 +1,6 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=servicecertsigner.config.openshift.io +package v1alpha1 diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/register.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..19ef421b2231f893af7f8c71b97e3539e3432a02 --- /dev/null +++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/register.go @@ -0,0 +1,40 @@ +package v1alpha1 + +import ( + configv1 "github.com/openshift/api/config/v1" + operatorsv1alpha1api "github.com/openshift/api/operator/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "servicecertsigner.config.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install, operatorsv1alpha1api.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ServiceCertSignerOperatorConfig{}, + &ServiceCertSignerOperatorConfigList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + + return nil +} diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/types.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..dcae13acfb7178c6828c4df90246475c87b7d9d6 --- /dev/null +++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/types.go @@ -0,0 +1,39 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCertSignerOperatorConfig provides information to configure an operator to manage the service cert signing controllers +type ServiceCertSignerOperatorConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec ServiceCertSignerOperatorConfigSpec `json:"spec"` + Status ServiceCertSignerOperatorConfigStatus `json:"status"` +} + +type ServiceCertSignerOperatorConfigSpec struct { + operatorv1.OperatorSpec `json:",inline"` +} + +type ServiceCertSignerOperatorConfigStatus struct { + operatorv1.OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCertSignerOperatorConfigList is a collection of items +type ServiceCertSignerOperatorConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + // Items contains the items + Items []ServiceCertSignerOperatorConfig `json:"items"` +} diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..9777591da420d5c9379363ef9078f8a9db2138d2 --- /dev/null +++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,104 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCertSignerOperatorConfig) DeepCopyInto(out *ServiceCertSignerOperatorConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCertSignerOperatorConfig. +func (in *ServiceCertSignerOperatorConfig) DeepCopy() *ServiceCertSignerOperatorConfig { + if in == nil { + return nil + } + out := new(ServiceCertSignerOperatorConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCertSignerOperatorConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCertSignerOperatorConfigList) DeepCopyInto(out *ServiceCertSignerOperatorConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCertSignerOperatorConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCertSignerOperatorConfigList. +func (in *ServiceCertSignerOperatorConfigList) DeepCopy() *ServiceCertSignerOperatorConfigList { + if in == nil { + return nil + } + out := new(ServiceCertSignerOperatorConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCertSignerOperatorConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCertSignerOperatorConfigSpec) DeepCopyInto(out *ServiceCertSignerOperatorConfigSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCertSignerOperatorConfigSpec. +func (in *ServiceCertSignerOperatorConfigSpec) DeepCopy() *ServiceCertSignerOperatorConfigSpec { + if in == nil { + return nil + } + out := new(ServiceCertSignerOperatorConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCertSignerOperatorConfigStatus) DeepCopyInto(out *ServiceCertSignerOperatorConfigStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCertSignerOperatorConfigStatus. +func (in *ServiceCertSignerOperatorConfigStatus) DeepCopy() *ServiceCertSignerOperatorConfigStatus { + if in == nil { + return nil + } + out := new(ServiceCertSignerOperatorConfigStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..d22bd46ef3d9be22c1e1aa6c1dade60921533608 --- /dev/null +++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,31 @@ +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ServiceCertSignerOperatorConfig = map[string]string{ + "": "ServiceCertSignerOperatorConfig provides information to configure an operator to manage the service cert signing controllers", +} + +func (ServiceCertSignerOperatorConfig) SwaggerDoc() map[string]string { + return map_ServiceCertSignerOperatorConfig +} + +var map_ServiceCertSignerOperatorConfigList = map[string]string{ + "": "ServiceCertSignerOperatorConfigList is a collection of items", + "items": "Items contains the items", +} + +func (ServiceCertSignerOperatorConfigList) SwaggerDoc() map[string]string { + return map_ServiceCertSignerOperatorConfigList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/template/OWNERS b/vendor/github.com/openshift/api/template/OWNERS new file mode 100644 index 0000000000000000000000000000000000000000..c1ece8b2136fa1403225d55266da33c80a28610a --- /dev/null +++ b/vendor/github.com/openshift/api/template/OWNERS @@ -0,0 +1,4 @@ +reviewers: + - bparees + - gabemontero + - jim-minter diff --git a/vendor/github.com/openshift/api/template/install.go b/vendor/github.com/openshift/api/template/install.go new file mode 100644 index 0000000000000000000000000000000000000000..8a69398dd09a2f23c3dc230244d99de45ca79a84 --- /dev/null +++ b/vendor/github.com/openshift/api/template/install.go @@ -0,0 +1,26 @@ +package template + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + templatev1 "github.com/openshift/api/template/v1" +) + +const ( + GroupName = "template.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(templatev1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/template/v1/codec.go b/vendor/github.com/openshift/api/template/v1/codec.go new file mode 100644 index 0000000000000000000000000000000000000000..9e9177ed6a7da193b48b3e1bb96e14046ce98fec --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/codec.go @@ -0,0 +1,33 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/openshift/api/pkg/serialization" +) + +var _ runtime.NestedObjectDecoder = &Template{} +var _ runtime.NestedObjectEncoder = &Template{} + +// DecodeNestedObjects decodes the object as a runtime.Unknown with JSON content. +func (c *Template) DecodeNestedObjects(d runtime.Decoder) error { + for i := range c.Objects { + if c.Objects[i].Object != nil { + continue + } + c.Objects[i].Object = &runtime.Unknown{ + ContentType: "application/json", + Raw: c.Objects[i].Raw, + } + } + return nil +} +func (c *Template) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Objects { + if err := serialization.EncodeNestedRawExtension(unstructured.UnstructuredJSONScheme, &c.Objects[i]); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/openshift/api/template/v1/consts.go b/vendor/github.com/openshift/api/template/v1/consts.go new file mode 100644 index 0000000000000000000000000000000000000000..cc8b49d55f681e834edf24988d322f2469a12632 --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/consts.go @@ -0,0 +1,16 @@ +package v1 + +const ( + // TemplateInstanceFinalizer is used to clean up the objects created by the template instance, + // when the template instance is deleted. + TemplateInstanceFinalizer = "template.openshift.io/finalizer" + + // TemplateInstanceOwner is a label applied to all objects created from a template instance + // which contains the uid of the template instance. + TemplateInstanceOwner = "template.openshift.io/template-instance-owner" + + // WaitForReadyAnnotation indicates that the TemplateInstance controller + // should wait for the object to be ready before reporting the template + // instantiation complete. + WaitForReadyAnnotation = "template.alpha.openshift.io/wait-for-ready" +) diff --git a/vendor/github.com/openshift/api/template/v1/doc.go b/vendor/github.com/openshift/api/template/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..34f9f8d45545666624ce3ea88cb55995982eb737 --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/template/apis/template +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=template.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/template/v1/generated.pb.go b/vendor/github.com/openshift/api/template/v1/generated.pb.go new file mode 100644 index 0000000000000000000000000000000000000000..64d2c7159377a044df54078db5384cda7f4c58e1 --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/generated.pb.go @@ -0,0 +1,4183 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/template/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *BrokerTemplateInstance) Reset() { *m = BrokerTemplateInstance{} } +func (*BrokerTemplateInstance) ProtoMessage() {} +func (*BrokerTemplateInstance) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{0} +} +func (m *BrokerTemplateInstance) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BrokerTemplateInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BrokerTemplateInstance) XXX_Merge(src proto.Message) { + xxx_messageInfo_BrokerTemplateInstance.Merge(m, src) +} +func (m *BrokerTemplateInstance) XXX_Size() int { + return m.Size() +} +func (m *BrokerTemplateInstance) XXX_DiscardUnknown() { + xxx_messageInfo_BrokerTemplateInstance.DiscardUnknown(m) +} + +var xxx_messageInfo_BrokerTemplateInstance proto.InternalMessageInfo + +func (m *BrokerTemplateInstanceList) Reset() { *m = BrokerTemplateInstanceList{} } +func (*BrokerTemplateInstanceList) ProtoMessage() {} +func (*BrokerTemplateInstanceList) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{1} +} +func (m *BrokerTemplateInstanceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BrokerTemplateInstanceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BrokerTemplateInstanceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_BrokerTemplateInstanceList.Merge(m, src) +} +func (m *BrokerTemplateInstanceList) XXX_Size() int { + return m.Size() +} +func (m *BrokerTemplateInstanceList) XXX_DiscardUnknown() { + xxx_messageInfo_BrokerTemplateInstanceList.DiscardUnknown(m) +} + +var xxx_messageInfo_BrokerTemplateInstanceList proto.InternalMessageInfo + +func (m *BrokerTemplateInstanceSpec) Reset() { *m = BrokerTemplateInstanceSpec{} } +func (*BrokerTemplateInstanceSpec) ProtoMessage() {} +func (*BrokerTemplateInstanceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{2} +} +func (m *BrokerTemplateInstanceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BrokerTemplateInstanceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BrokerTemplateInstanceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_BrokerTemplateInstanceSpec.Merge(m, src) +} +func (m *BrokerTemplateInstanceSpec) XXX_Size() int { + return m.Size() +} +func (m *BrokerTemplateInstanceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_BrokerTemplateInstanceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_BrokerTemplateInstanceSpec proto.InternalMessageInfo + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{3} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo + +func (m *Parameter) Reset() { *m = Parameter{} } +func (*Parameter) ProtoMessage() {} +func (*Parameter) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{4} +} +func (m *Parameter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Parameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Parameter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Parameter.Merge(m, src) +} +func (m *Parameter) XXX_Size() int { + return m.Size() +} +func (m *Parameter) XXX_DiscardUnknown() { + xxx_messageInfo_Parameter.DiscardUnknown(m) +} + +var xxx_messageInfo_Parameter proto.InternalMessageInfo + +func (m *Template) Reset() { *m = Template{} } +func (*Template) ProtoMessage() {} +func (*Template) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{5} +} +func (m *Template) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Template) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Template) XXX_Merge(src proto.Message) { + xxx_messageInfo_Template.Merge(m, src) +} +func (m *Template) XXX_Size() int { + return m.Size() +} +func (m *Template) XXX_DiscardUnknown() { + xxx_messageInfo_Template.DiscardUnknown(m) +} + +var xxx_messageInfo_Template proto.InternalMessageInfo + +func (m *TemplateInstance) Reset() { *m = TemplateInstance{} } +func (*TemplateInstance) ProtoMessage() {} +func (*TemplateInstance) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{6} +} +func (m *TemplateInstance) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstance) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstance.Merge(m, src) +} +func (m *TemplateInstance) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstance) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstance.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstance proto.InternalMessageInfo + +func (m *TemplateInstanceCondition) Reset() { *m = TemplateInstanceCondition{} } +func (*TemplateInstanceCondition) ProtoMessage() {} +func (*TemplateInstanceCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{7} +} +func (m *TemplateInstanceCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstanceCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstanceCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstanceCondition.Merge(m, src) +} +func (m *TemplateInstanceCondition) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstanceCondition) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstanceCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstanceCondition proto.InternalMessageInfo + +func (m *TemplateInstanceList) Reset() { *m = TemplateInstanceList{} } +func (*TemplateInstanceList) ProtoMessage() {} +func (*TemplateInstanceList) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{8} +} +func (m *TemplateInstanceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstanceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstanceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstanceList.Merge(m, src) +} +func (m *TemplateInstanceList) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstanceList) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstanceList.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstanceList proto.InternalMessageInfo + +func (m *TemplateInstanceObject) Reset() { *m = TemplateInstanceObject{} } +func (*TemplateInstanceObject) ProtoMessage() {} +func (*TemplateInstanceObject) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{9} +} +func (m *TemplateInstanceObject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstanceObject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstanceObject) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstanceObject.Merge(m, src) +} +func (m *TemplateInstanceObject) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstanceObject) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstanceObject.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstanceObject proto.InternalMessageInfo + +func (m *TemplateInstanceRequester) Reset() { *m = TemplateInstanceRequester{} } +func (*TemplateInstanceRequester) ProtoMessage() {} +func (*TemplateInstanceRequester) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{10} +} +func (m *TemplateInstanceRequester) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstanceRequester) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstanceRequester) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstanceRequester.Merge(m, src) +} +func (m *TemplateInstanceRequester) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstanceRequester) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstanceRequester.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstanceRequester proto.InternalMessageInfo + +func (m *TemplateInstanceSpec) Reset() { *m = TemplateInstanceSpec{} } +func (*TemplateInstanceSpec) ProtoMessage() {} +func (*TemplateInstanceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{11} +} +func (m *TemplateInstanceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstanceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstanceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstanceSpec.Merge(m, src) +} +func (m *TemplateInstanceSpec) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstanceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstanceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstanceSpec proto.InternalMessageInfo + +func (m *TemplateInstanceStatus) Reset() { *m = TemplateInstanceStatus{} } +func (*TemplateInstanceStatus) ProtoMessage() {} +func (*TemplateInstanceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{12} +} +func (m *TemplateInstanceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstanceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstanceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstanceStatus.Merge(m, src) +} +func (m *TemplateInstanceStatus) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstanceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstanceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstanceStatus proto.InternalMessageInfo + +func (m *TemplateList) Reset() { *m = TemplateList{} } +func (*TemplateList) ProtoMessage() {} +func (*TemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{13} +} +func (m *TemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateList.Merge(m, src) +} +func (m *TemplateList) XXX_Size() int { + return m.Size() +} +func (m *TemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateList.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*BrokerTemplateInstance)(nil), "github.com.openshift.api.template.v1.BrokerTemplateInstance") + proto.RegisterType((*BrokerTemplateInstanceList)(nil), "github.com.openshift.api.template.v1.BrokerTemplateInstanceList") + proto.RegisterType((*BrokerTemplateInstanceSpec)(nil), "github.com.openshift.api.template.v1.BrokerTemplateInstanceSpec") + proto.RegisterType((*ExtraValue)(nil), "github.com.openshift.api.template.v1.ExtraValue") + proto.RegisterType((*Parameter)(nil), "github.com.openshift.api.template.v1.Parameter") + proto.RegisterType((*Template)(nil), "github.com.openshift.api.template.v1.Template") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.template.v1.Template.LabelsEntry") + proto.RegisterType((*TemplateInstance)(nil), "github.com.openshift.api.template.v1.TemplateInstance") + proto.RegisterType((*TemplateInstanceCondition)(nil), "github.com.openshift.api.template.v1.TemplateInstanceCondition") + proto.RegisterType((*TemplateInstanceList)(nil), "github.com.openshift.api.template.v1.TemplateInstanceList") + proto.RegisterType((*TemplateInstanceObject)(nil), "github.com.openshift.api.template.v1.TemplateInstanceObject") + proto.RegisterType((*TemplateInstanceRequester)(nil), "github.com.openshift.api.template.v1.TemplateInstanceRequester") + proto.RegisterMapType((map[string]ExtraValue)(nil), "github.com.openshift.api.template.v1.TemplateInstanceRequester.ExtraEntry") + proto.RegisterType((*TemplateInstanceSpec)(nil), "github.com.openshift.api.template.v1.TemplateInstanceSpec") + proto.RegisterType((*TemplateInstanceStatus)(nil), "github.com.openshift.api.template.v1.TemplateInstanceStatus") + proto.RegisterType((*TemplateList)(nil), "github.com.openshift.api.template.v1.TemplateList") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/template/v1/generated.proto", fileDescriptor_8d3ee9f55fa8363e) +} + +var fileDescriptor_8d3ee9f55fa8363e = []byte{ + // 1243 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x56, 0x4d, 0x8f, 0x1b, 0x45, + 0x13, 0xf6, 0xf8, 0x6b, 0xed, 0x76, 0x92, 0x77, 0xd5, 0x6f, 0x14, 0x0d, 0x96, 0x62, 0x5b, 0x13, + 0x84, 0x0c, 0x4a, 0xc6, 0x6c, 0x14, 0x42, 0x58, 0x21, 0x01, 0xc3, 0x6e, 0xa2, 0x85, 0x0d, 0xa0, + 0xde, 0x0d, 0x42, 0xb0, 0x07, 0xda, 0xe3, 0xb6, 0x77, 0xb2, 0x9e, 0x0f, 0xba, 0xdb, 0x26, 0xbe, + 0xe5, 0xc0, 0x0f, 0xe0, 0xc8, 0x91, 0x9f, 0xc0, 0x91, 0x13, 0x12, 0xb7, 0x3d, 0x86, 0x5b, 0x0e, + 0x60, 0xb1, 0xe6, 0xc4, 0x1f, 0x00, 0x29, 0x5c, 0x50, 0xf7, 0xf4, 0x7c, 0xf8, 0x8b, 0x78, 0x37, + 0x52, 0x72, 0x9b, 0xa9, 0xae, 0xe7, 0xa9, 0xae, 0xea, 0xea, 0xa7, 0x0b, 0xdc, 0xe8, 0x39, 0xfc, + 0x70, 0xd0, 0x36, 0x6d, 0xdf, 0x6d, 0xf9, 0x01, 0xf1, 0xd8, 0xa1, 0xd3, 0xe5, 0x2d, 0x1c, 0x38, + 0x2d, 0x4e, 0xdc, 0xa0, 0x8f, 0x39, 0x69, 0x0d, 0x37, 0x5a, 0x3d, 0xe2, 0x11, 0x8a, 0x39, 0xe9, + 0x98, 0x01, 0xf5, 0xb9, 0x0f, 0x5f, 0x4e, 0x50, 0x66, 0x8c, 0x32, 0x71, 0xe0, 0x98, 0x11, 0xca, + 0x1c, 0x6e, 0x54, 0xaf, 0xa5, 0xb8, 0x7b, 0x7e, 0xcf, 0x6f, 0x49, 0x70, 0x7b, 0xd0, 0x95, 0x7f, + 0xf2, 0x47, 0x7e, 0x85, 0xa4, 0x55, 0xe3, 0xe8, 0x16, 0x33, 0x1d, 0x5f, 0x06, 0xb7, 0x7d, 0xba, + 0x28, 0x70, 0xf5, 0x46, 0xe2, 0xe3, 0x62, 0xfb, 0xd0, 0xf1, 0x08, 0x1d, 0xb5, 0x82, 0xa3, 0x9e, + 0x30, 0xb0, 0x96, 0x4b, 0x38, 0x5e, 0x84, 0x6a, 0x2d, 0x43, 0xd1, 0x81, 0xc7, 0x1d, 0x97, 0xcc, + 0x01, 0x6e, 0x3e, 0x0d, 0xc0, 0xec, 0x43, 0xe2, 0xe2, 0x59, 0x9c, 0x31, 0xd6, 0xc0, 0x25, 0x8b, + 0xfa, 0x47, 0x84, 0xee, 0xab, 0x3a, 0xec, 0x78, 0x8c, 0x63, 0xcf, 0x26, 0xf0, 0x4b, 0x50, 0x12, + 0xdb, 0xeb, 0x60, 0x8e, 0x75, 0xad, 0xa1, 0x35, 0x2b, 0xd7, 0x5f, 0x37, 0xc3, 0x28, 0x66, 0x3a, + 0x8a, 0x19, 0x1c, 0xf5, 0x84, 0x81, 0x99, 0xc2, 0xdb, 0x1c, 0x6e, 0x98, 0x1f, 0xb7, 0xef, 0x13, + 0x9b, 0xdf, 0x25, 0x1c, 0x5b, 0xf0, 0x78, 0x5c, 0xcf, 0x4c, 0xc6, 0x75, 0x90, 0xd8, 0x50, 0xcc, + 0x0a, 0xdb, 0x20, 0xcf, 0x02, 0x62, 0xeb, 0x59, 0xc9, 0xfe, 0xae, 0xb9, 0xca, 0x19, 0x99, 0x8b, + 0x77, 0xbb, 0x17, 0x10, 0xdb, 0x3a, 0xa7, 0xa2, 0xe5, 0xc5, 0x1f, 0x92, 0xdc, 0xc6, 0x6f, 0x1a, + 0xa8, 0x2e, 0x86, 0xec, 0x3a, 0x8c, 0xc3, 0x83, 0xb9, 0x24, 0xcd, 0xd5, 0x92, 0x14, 0x68, 0x99, + 0xe2, 0xba, 0x0a, 0x5a, 0x8a, 0x2c, 0xa9, 0x04, 0x31, 0x28, 0x38, 0x9c, 0xb8, 0x4c, 0xcf, 0x36, + 0x72, 0xcd, 0xca, 0xf5, 0xb7, 0x9f, 0x25, 0x43, 0xeb, 0xbc, 0x0a, 0x54, 0xd8, 0x11, 0x94, 0x28, + 0x64, 0x36, 0xbe, 0xc9, 0x2e, 0xcb, 0x4f, 0x14, 0x01, 0x3a, 0x60, 0x9d, 0xcf, 0xd8, 0x55, 0x9e, + 0x57, 0x52, 0x79, 0x9a, 0xa2, 0x7b, 0x93, 0xa3, 0x43, 0xa4, 0x4b, 0x28, 0x11, 0x31, 0x75, 0x15, + 0x73, 0x7d, 0x96, 0x1c, 0xcd, 0xd1, 0xc2, 0x0f, 0x41, 0x91, 0x11, 0x9b, 0x12, 0xae, 0xce, 0x73, + 0xa5, 0x00, 0x17, 0x54, 0x80, 0xe2, 0x9e, 0x84, 0x22, 0x45, 0x01, 0x4d, 0x00, 0xda, 0x8e, 0xd7, + 0x71, 0xbc, 0xde, 0xce, 0x16, 0xd3, 0x73, 0x8d, 0x5c, 0xb3, 0x6c, 0x5d, 0x10, 0x8d, 0x64, 0xc5, + 0x56, 0x94, 0xf2, 0x30, 0xde, 0x04, 0x60, 0xfb, 0x01, 0xa7, 0xf8, 0x53, 0xdc, 0x1f, 0x10, 0x58, + 0x8f, 0xea, 0xae, 0x49, 0x60, 0x79, 0xb6, 0x6a, 0x9b, 0xa5, 0xef, 0xbe, 0xaf, 0x67, 0x1e, 0xfe, + 0xda, 0xc8, 0x18, 0x3f, 0x65, 0x41, 0xf9, 0x13, 0x4c, 0xb1, 0x4b, 0x38, 0xa1, 0xb0, 0x01, 0xf2, + 0x1e, 0x76, 0xc3, 0x12, 0x95, 0x93, 0x7e, 0xfa, 0x08, 0xbb, 0x04, 0xc9, 0x15, 0xf8, 0x06, 0xa8, + 0x74, 0x1c, 0x16, 0xf4, 0xf1, 0x48, 0x18, 0x65, 0xaa, 0x65, 0xeb, 0xff, 0xca, 0xb1, 0xb2, 0x95, + 0x2c, 0xa1, 0xb4, 0x9f, 0x84, 0x11, 0x66, 0x53, 0x27, 0xe0, 0x8e, 0xef, 0xe9, 0xb9, 0x19, 0x58, + 0xb2, 0x84, 0xd2, 0x7e, 0xf0, 0x0a, 0x28, 0x0c, 0x45, 0x46, 0x7a, 0x5e, 0x02, 0xe2, 0x16, 0x90, + 0x69, 0xa2, 0x70, 0x0d, 0x5e, 0x05, 0xa5, 0xe8, 0x5a, 0xeb, 0x05, 0xe9, 0x17, 0xf7, 0xe4, 0x1d, + 0x65, 0x47, 0xb1, 0x87, 0x48, 0xb1, 0x4b, 0x7d, 0x57, 0x2f, 0x4e, 0xa7, 0x78, 0x9b, 0xfa, 0x2e, + 0x92, 0x2b, 0x82, 0x8f, 0x92, 0xaf, 0x06, 0x0e, 0x25, 0x1d, 0x7d, 0xad, 0xa1, 0x35, 0x4b, 0x09, + 0x1f, 0x52, 0x76, 0x14, 0x7b, 0x18, 0xff, 0xe4, 0x40, 0x29, 0xea, 0x8e, 0xe7, 0xa0, 0x19, 0xaf, + 0x82, 0x35, 0x97, 0x30, 0x86, 0x7b, 0x51, 0xed, 0xff, 0xa7, 0xdc, 0xd7, 0xee, 0x86, 0x66, 0x14, + 0xad, 0xc3, 0xcf, 0xc0, 0x9a, 0x2f, 0x29, 0xc2, 0x06, 0xaa, 0x5c, 0xbf, 0xb6, 0x74, 0x2f, 0x4a, + 0x25, 0x4d, 0x84, 0xbf, 0xde, 0x7e, 0xc0, 0x89, 0xc7, 0x1c, 0xdf, 0x4b, 0x98, 0xc3, 0x8d, 0x30, + 0x14, 0xd1, 0x41, 0x1b, 0x80, 0x20, 0xea, 0x19, 0xa6, 0xe7, 0x25, 0x79, 0x6b, 0xb5, 0xcb, 0x1d, + 0xf7, 0x5a, 0x92, 0x67, 0x6c, 0x62, 0x28, 0x45, 0x0b, 0x0f, 0x41, 0xb1, 0x8f, 0xdb, 0xa4, 0xcf, + 0xf4, 0x82, 0x0c, 0xb0, 0xb9, 0x5a, 0x80, 0xe8, 0x2c, 0xcc, 0x5d, 0x09, 0xde, 0xf6, 0x38, 0x1d, + 0x59, 0x17, 0x55, 0xac, 0x73, 0x61, 0x2a, 0xe1, 0x12, 0x52, 0xfc, 0xd5, 0xb7, 0x40, 0x25, 0xe5, + 0x0c, 0xd7, 0x41, 0xee, 0x88, 0x8c, 0xc2, 0x3b, 0x80, 0xc4, 0x27, 0xbc, 0x18, 0xb5, 0xa1, 0x2c, + 0xb9, 0xea, 0xbb, 0xcd, 0xec, 0x2d, 0xcd, 0xf8, 0x31, 0x0b, 0xd6, 0x5f, 0xc0, 0xcb, 0x71, 0x30, + 0xf5, 0x72, 0x9c, 0xb2, 0x32, 0x4f, 0x7b, 0x33, 0x60, 0x07, 0x14, 0x19, 0xc7, 0x7c, 0xc0, 0xe4, + 0x3d, 0x5d, 0x59, 0xb7, 0xe7, 0xf8, 0x25, 0x47, 0x4a, 0xe2, 0xe4, 0x3f, 0x52, 0xdc, 0xc6, 0xdf, + 0x59, 0xf0, 0xd2, 0x2c, 0xe4, 0x7d, 0xdf, 0xeb, 0x38, 0xf2, 0xe6, 0xbf, 0x07, 0xf2, 0x7c, 0x14, + 0x44, 0x4a, 0x74, 0x2d, 0xda, 0xe5, 0xfe, 0x28, 0x20, 0x4f, 0xc6, 0xf5, 0xcb, 0x4b, 0x81, 0xc2, + 0x01, 0x49, 0x28, 0xdc, 0x8d, 0xd3, 0x08, 0x6f, 0xca, 0x8d, 0xe9, 0x8d, 0x3c, 0x19, 0xd7, 0x17, + 0x0c, 0x30, 0x66, 0xcc, 0x34, 0xbd, 0x5d, 0x38, 0x04, 0xb0, 0x8f, 0x19, 0xdf, 0xa7, 0xd8, 0x63, + 0x61, 0x24, 0xc7, 0x25, 0xaa, 0x40, 0xaf, 0xad, 0x76, 0xbc, 0x02, 0x61, 0x55, 0xd5, 0x2e, 0xe0, + 0xee, 0x1c, 0x1b, 0x5a, 0x10, 0x01, 0xbe, 0x02, 0x8a, 0x94, 0x60, 0xe6, 0x7b, 0x4a, 0x03, 0xe3, + 0x72, 0x22, 0x69, 0x45, 0x6a, 0x35, 0x2d, 0x0c, 0x85, 0xff, 0x16, 0x06, 0xe3, 0x17, 0x0d, 0x5c, + 0x7c, 0x01, 0xd3, 0xc0, 0x17, 0xd3, 0xd3, 0xc0, 0xcd, 0xb3, 0x75, 0xd5, 0x92, 0x39, 0xe0, 0x00, + 0x5c, 0x9a, 0xf5, 0x0c, 0x6f, 0x0e, 0xb4, 0x40, 0x8e, 0x92, 0xee, 0x69, 0x5e, 0xfd, 0x8a, 0x8a, + 0x90, 0x43, 0xa4, 0x8b, 0x04, 0xd8, 0xf8, 0x73, 0x41, 0xaf, 0x8a, 0xb7, 0x80, 0x30, 0xf1, 0x6a, + 0x5e, 0x05, 0xa5, 0x01, 0x23, 0x34, 0xf5, 0x72, 0xc6, 0x65, 0xb8, 0xa7, 0xec, 0x28, 0xf6, 0x80, + 0x97, 0x41, 0x6e, 0xe0, 0x74, 0x54, 0x4f, 0xc6, 0xa1, 0xee, 0xed, 0x6c, 0x21, 0x61, 0x87, 0x06, + 0x28, 0xf6, 0xa8, 0x3f, 0x08, 0xa2, 0x57, 0x1f, 0x88, 0xb3, 0xbe, 0x23, 0x2d, 0x48, 0xad, 0x40, + 0x1f, 0x14, 0x88, 0x78, 0xed, 0x95, 0xf4, 0x7e, 0x70, 0xb6, 0x4a, 0xc6, 0x09, 0x98, 0x72, 0x74, + 0x08, 0x95, 0x32, 0xae, 0xae, 0xb4, 0xa1, 0x30, 0x4e, 0xf5, 0xbe, 0x1a, 0x2f, 0x96, 0x09, 0xe4, + 0xed, 0xb4, 0x40, 0x0a, 0xb9, 0x5b, 0x69, 0x43, 0xc9, 0xc4, 0x92, 0x96, 0xd4, 0x1f, 0xb2, 0xf3, + 0xdd, 0x29, 0x67, 0xb9, 0x03, 0x50, 0x8a, 0xd0, 0x71, 0x77, 0x9e, 0x2a, 0xf1, 0xe4, 0x58, 0x22, + 0x0b, 0x8a, 0x19, 0xa5, 0x5a, 0xa4, 0xc7, 0xb7, 0xe6, 0xa2, 0x4e, 0xd9, 0xf5, 0x6d, 0xdc, 0x9f, + 0x6d, 0x17, 0xb0, 0x60, 0x7e, 0xeb, 0x83, 0x32, 0x8d, 0xca, 0xab, 0x44, 0xe2, 0x9d, 0x67, 0x3c, + 0x25, 0xeb, 0xfc, 0x64, 0x5c, 0x2f, 0xc7, 0xbf, 0x28, 0x09, 0x60, 0xfc, 0xa5, 0xcd, 0x77, 0x7f, + 0x28, 0x5f, 0x90, 0x01, 0x60, 0x47, 0x8a, 0x16, 0xce, 0x83, 0x67, 0xde, 0x49, 0xac, 0x8c, 0xc9, + 0xe3, 0x14, 0x9b, 0x18, 0x4a, 0x85, 0x81, 0xbd, 0x64, 0xf2, 0x38, 0xd5, 0xe4, 0xbf, 0xf8, 0x06, + 0x2f, 0x1f, 0x44, 0x8c, 0x9f, 0x35, 0x70, 0x2e, 0x02, 0x3d, 0x07, 0x05, 0xdb, 0x9b, 0x56, 0xb0, + 0xd3, 0xb6, 0xdf, 0x42, 0xe5, 0xb2, 0x9a, 0xc7, 0x27, 0xb5, 0xcc, 0xa3, 0x93, 0x5a, 0xe6, 0xf1, + 0x49, 0x2d, 0xf3, 0x70, 0x52, 0xd3, 0x8e, 0x27, 0x35, 0xed, 0xd1, 0xa4, 0xa6, 0x3d, 0x9e, 0xd4, + 0xb4, 0xdf, 0x27, 0x35, 0xed, 0xdb, 0x3f, 0x6a, 0x99, 0xcf, 0xb3, 0xc3, 0x8d, 0x7f, 0x03, 0x00, + 0x00, 0xff, 0xff, 0x33, 0xf4, 0x09, 0xac, 0xfb, 0x0f, 0x00, 0x00, +} + +func (m *BrokerTemplateInstance) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BrokerTemplateInstance) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BrokerTemplateInstance) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BrokerTemplateInstanceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BrokerTemplateInstanceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BrokerTemplateInstanceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BrokerTemplateInstanceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BrokerTemplateInstanceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BrokerTemplateInstanceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.BindingIDs) > 0 { + for iNdEx := len(m.BindingIDs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BindingIDs[iNdEx]) + copy(dAtA[i:], m.BindingIDs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingIDs[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.TemplateInstance.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m ExtraValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Parameter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Parameter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Parameter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Required { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + i -= len(m.From) + copy(dAtA[i:], m.From) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.From))) + i-- + dAtA[i] = 0x32 + i -= len(m.Generate) + copy(dAtA[i:], m.Generate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Generate))) + i-- + dAtA[i] = 0x2a + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x22 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x1a + i -= len(m.DisplayName) + copy(dAtA[i:], m.DisplayName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Template) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Template) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Template) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ObjectLabels) > 0 { + keysForObjectLabels := make([]string, 0, len(m.ObjectLabels)) + for k := range m.ObjectLabels { + keysForObjectLabels = append(keysForObjectLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForObjectLabels) + for iNdEx := len(keysForObjectLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.ObjectLabels[string(keysForObjectLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForObjectLabels[iNdEx]) + copy(dAtA[i:], keysForObjectLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForObjectLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Parameters) > 0 { + for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Objects) > 0 { + for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstance) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstance) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstance) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstanceCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstanceCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstanceCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstanceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstanceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstanceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstanceObject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstanceObject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstanceObject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Ref.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstanceRequester) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstanceRequester) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstanceRequester) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstanceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstanceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstanceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Requester != nil { + { + size, err := m.Requester.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Secret != nil { + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstanceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstanceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstanceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Objects) > 0 { + for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BrokerTemplateInstance) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BrokerTemplateInstanceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BrokerTemplateInstanceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.TemplateInstance.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.BindingIDs) > 0 { + for _, s := range m.BindingIDs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Parameter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DisplayName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Generate) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.From) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Template) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Objects) > 0 { + for _, e := range m.Objects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Parameters) > 0 { + for _, e := range m.Parameters { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ObjectLabels) > 0 { + for k, v := range m.ObjectLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *TemplateInstance) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TemplateInstanceCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TemplateInstanceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TemplateInstanceObject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Ref.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TemplateInstanceRequester) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *TemplateInstanceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Requester != nil { + l = m.Requester.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TemplateInstanceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Objects) > 0 { + for _, e := range m.Objects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *BrokerTemplateInstance) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BrokerTemplateInstance{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BrokerTemplateInstanceSpec", "BrokerTemplateInstanceSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BrokerTemplateInstanceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]BrokerTemplateInstance{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "BrokerTemplateInstance", "BrokerTemplateInstance", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&BrokerTemplateInstanceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *BrokerTemplateInstanceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BrokerTemplateInstanceSpec{`, + `TemplateInstance:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TemplateInstance), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `Secret:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secret), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `BindingIDs:` + fmt.Sprintf("%v", this.BindingIDs) + `,`, + `}`, + }, "") + return s +} +func (this *Parameter) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Parameter{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DisplayName:` + fmt.Sprintf("%v", this.DisplayName) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Generate:` + fmt.Sprintf("%v", this.Generate) + `,`, + `From:` + fmt.Sprintf("%v", this.From) + `,`, + `Required:` + fmt.Sprintf("%v", this.Required) + `,`, + `}`, + }, "") + return s +} +func (this *Template) String() string { + if this == nil { + return "nil" + } + repeatedStringForObjects := "[]RawExtension{" + for _, f := range this.Objects { + repeatedStringForObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForObjects += "}" + repeatedStringForParameters := "[]Parameter{" + for _, f := range this.Parameters { + repeatedStringForParameters += strings.Replace(strings.Replace(f.String(), "Parameter", "Parameter", 1), `&`, ``, 1) + "," + } + repeatedStringForParameters += "}" + keysForObjectLabels := make([]string, 0, len(this.ObjectLabels)) + for k := range this.ObjectLabels { + keysForObjectLabels = append(keysForObjectLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForObjectLabels) + mapStringForObjectLabels := "map[string]string{" + for _, k := range keysForObjectLabels { + mapStringForObjectLabels += fmt.Sprintf("%v: %v,", k, this.ObjectLabels[k]) + } + mapStringForObjectLabels += "}" + s := strings.Join([]string{`&Template{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Objects:` + repeatedStringForObjects + `,`, + `Parameters:` + repeatedStringForParameters + `,`, + `ObjectLabels:` + mapStringForObjectLabels + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstance) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TemplateInstance{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TemplateInstanceSpec", "TemplateInstanceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TemplateInstanceStatus", "TemplateInstanceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstanceCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TemplateInstanceCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstanceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]TemplateInstance{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "TemplateInstance", "TemplateInstance", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&TemplateInstanceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstanceObject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TemplateInstanceObject{`, + `Ref:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ref), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstanceRequester) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&TemplateInstanceRequester{`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstanceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TemplateInstanceSpec{`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "Template", "Template", 1), `&`, ``, 1) + `,`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `Requester:` + strings.Replace(this.Requester.String(), "TemplateInstanceRequester", "TemplateInstanceRequester", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstanceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]TemplateInstanceCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "TemplateInstanceCondition", "TemplateInstanceCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + repeatedStringForObjects := "[]TemplateInstanceObject{" + for _, f := range this.Objects { + repeatedStringForObjects += strings.Replace(strings.Replace(f.String(), "TemplateInstanceObject", "TemplateInstanceObject", 1), `&`, ``, 1) + "," + } + repeatedStringForObjects += "}" + s := strings.Join([]string{`&TemplateInstanceStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `Objects:` + repeatedStringForObjects + `,`, + `}`, + }, "") + return s +} +func (this *TemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Template{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Template", "Template", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&TemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *BrokerTemplateInstance) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BrokerTemplateInstance: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BrokerTemplateInstance: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BrokerTemplateInstanceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BrokerTemplateInstanceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BrokerTemplateInstanceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, BrokerTemplateInstance{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BrokerTemplateInstanceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BrokerTemplateInstanceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BrokerTemplateInstanceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateInstance", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TemplateInstance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BindingIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BindingIDs = append(m.BindingIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExtraValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Parameter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Parameter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Parameter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DisplayName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DisplayName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Generate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Generate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Required", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Required = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Template) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Template: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Template: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Objects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Objects = append(m.Objects, runtime.RawExtension{}) + if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameters = append(m.Parameters, Parameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectLabels == nil { + m.ObjectLabels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ObjectLabels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstance) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstance: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstance: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstanceCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstanceCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstanceCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = TemplateInstanceConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstanceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstanceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstanceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, TemplateInstance{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstanceObject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstanceObject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstanceObject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Ref.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstanceRequester) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstanceRequester: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstanceRequester: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstanceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstanceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstanceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &v11.LocalObjectReference{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requester", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Requester == nil { + m.Requester = &TemplateInstanceRequester{} + } + if err := m.Requester.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstanceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstanceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstanceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, TemplateInstanceCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Objects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Objects = append(m.Objects, TemplateInstanceObject{}) + if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Template{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/openshift/api/template/v1/generated.proto b/vendor/github.com/openshift/api/template/v1/generated.proto new file mode 100644 index 0000000000000000000000000000000000000000..5c017e228f03f2ba95a56852bf78a5de4a39a76d --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/generated.proto @@ -0,0 +1,232 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package github.com.openshift.api.template.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// BrokerTemplateInstance holds the service broker-related state associated with +// a TemplateInstance. BrokerTemplateInstance is part of an experimental API. +message BrokerTemplateInstance { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec describes the state of this BrokerTemplateInstance. + optional BrokerTemplateInstanceSpec spec = 2; +} + +// BrokerTemplateInstanceList is a list of BrokerTemplateInstance objects. +message BrokerTemplateInstanceList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of BrokerTemplateInstances + repeated BrokerTemplateInstance items = 2; +} + +// BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance. +message BrokerTemplateInstanceSpec { + // templateinstance is a reference to a TemplateInstance object residing + // in a namespace. + optional k8s.io.api.core.v1.ObjectReference templateInstance = 1; + + // secret is a reference to a Secret object residing in a namespace, + // containing the necessary template parameters. + optional k8s.io.api.core.v1.ObjectReference secret = 2; + + // bindingids is a list of 'binding_id's provided during successive bind + // calls to the template service broker. + repeated string bindingIDs = 3; +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// Parameter defines a name/value variable that is to be processed during +// the Template to Config transformation. +message Parameter { + // Name must be set and it can be referenced in Template + // Items using ${PARAMETER_NAME}. Required. + optional string name = 1; + + // Optional: The name that will show in UI instead of parameter 'Name' + optional string displayName = 2; + + // Description of a parameter. Optional. + optional string description = 3; + + // Value holds the Parameter data. If specified, the generator will be + // ignored. The value replaces all occurrences of the Parameter ${Name} + // expression during the Template to Config transformation. Optional. + optional string value = 4; + + // generate specifies the generator to be used to generate random string + // from an input value specified by From field. The result string is + // stored into Value field. If empty, no generator is being used, leaving + // the result Value untouched. Optional. + // + // The only supported generator is "expression", which accepts a "from" + // value in the form of a simple regular expression containing the + // range expression "[a-zA-Z0-9]", and the length expression "a{length}". + // + // Examples: + // + // from | value + // ----------------------------- + // "test[0-9]{1}x" | "test7x" + // "[0-1]{8}" | "01001100" + // "0x[A-F0-9]{4}" | "0xB3AF" + // "[a-zA-Z0-9]{8}" | "hW4yQU5i" + optional string generate = 5; + + // From is an input value for the generator. Optional. + optional string from = 6; + + // Optional: Indicates the parameter must have a value. Defaults to false. + optional bool required = 7; +} + +// Template contains the inputs needed to produce a Config. +message Template { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // message is an optional instructional message that will + // be displayed when this template is instantiated. + // This field should inform the user how to utilize the newly created resources. + // Parameter substitution will be performed on the message before being + // displayed so that generated credentials and other parameters can be + // included in the output. + optional string message = 2; + + // objects is an array of resources to include in this template. + // If a namespace value is hardcoded in the object, it will be removed + // during template instantiation, however if the namespace value + // is, or contains, a ${PARAMETER_REFERENCE}, the resolved + // value after parameter substitution will be respected and the object + // will be created in that namespace. + // +kubebuilder:pruning:PreserveUnknownFields + repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; + + // parameters is an optional array of Parameters used during the + // Template to Config transformation. + repeated Parameter parameters = 4; + + // labels is a optional set of labels that are applied to every + // object during the Template to Config transformation. + map<string, string> labels = 5; +} + +// TemplateInstance requests and records the instantiation of a Template. +// TemplateInstance is part of an experimental API. +message TemplateInstance { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec describes the desired state of this TemplateInstance. + optional TemplateInstanceSpec spec = 2; + + // status describes the current state of this TemplateInstance. + // +optional + optional TemplateInstanceStatus status = 3; +} + +// TemplateInstanceCondition contains condition information for a +// TemplateInstance. +message TemplateInstanceCondition { + // Type of the condition, currently Ready or InstantiateFailure. + optional string type = 1; + + // Status of the condition, one of True, False or Unknown. + optional string status = 2; + + // LastTransitionTime is the last time a condition status transitioned from + // one state to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // Reason is a brief machine readable explanation for the condition's last + // transition. + optional string reason = 4; + + // Message is a human readable description of the details of the last + // transition, complementing reason. + optional string message = 5; +} + +// TemplateInstanceList is a list of TemplateInstance objects. +message TemplateInstanceList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of Templateinstances + repeated TemplateInstance items = 2; +} + +// TemplateInstanceObject references an object created by a TemplateInstance. +message TemplateInstanceObject { + // ref is a reference to the created object. When used under .spec, only + // name and namespace are used; these can contain references to parameters + // which will be substituted following the usual rules. + optional k8s.io.api.core.v1.ObjectReference ref = 1; +} + +// TemplateInstanceRequester holds the identity of an agent requesting a +// template instantiation. +message TemplateInstanceRequester { + // username uniquely identifies this user among all active users. + optional string username = 1; + + // uid is a unique value that identifies this user across time; if this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + optional string uid = 2; + + // groups represent the groups this user is a part of. + repeated string groups = 3; + + // extra holds additional information provided by the authenticator. + map<string, ExtraValue> extra = 4; +} + +// TemplateInstanceSpec describes the desired state of a TemplateInstance. +message TemplateInstanceSpec { + // template is a full copy of the template for instantiation. + optional Template template = 1; + + // secret is a reference to a Secret object containing the necessary + // template parameters. + optional k8s.io.api.core.v1.LocalObjectReference secret = 2; + + // requester holds the identity of the agent requesting the template + // instantiation. + // +optional + optional TemplateInstanceRequester requester = 3; +} + +// TemplateInstanceStatus describes the current state of a TemplateInstance. +message TemplateInstanceStatus { + // conditions represent the latest available observations of a + // TemplateInstance's current state. + repeated TemplateInstanceCondition conditions = 1; + + // Objects references the objects created by the TemplateInstance. + repeated TemplateInstanceObject objects = 2; +} + +// TemplateList is a list of Template objects. +message TemplateList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of templates + repeated Template items = 2; +} + diff --git a/vendor/github.com/openshift/api/template/v1/legacy.go b/vendor/github.com/openshift/api/template/v1/legacy.go new file mode 100644 index 0000000000000000000000000000000000000000..9266f3ac9e69fa515512956ecfa44cfbd4b6453e --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/legacy.go @@ -0,0 +1,24 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Template{}, + &TemplateList{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + scheme.AddKnownTypeWithName(legacyGroupVersion.WithKind("TemplateConfig"), &Template{}) + scheme.AddKnownTypeWithName(legacyGroupVersion.WithKind("ProcessedTemplate"), &Template{}) + return nil +} diff --git a/vendor/github.com/openshift/api/template/v1/register.go b/vendor/github.com/openshift/api/template/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..e34ff5610b60352b2252493cd79d357c57ca5763 --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/register.go @@ -0,0 +1,43 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "template.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Template{}, + &TemplateList{}, + &TemplateInstance{}, + &TemplateInstanceList{}, + &BrokerTemplateInstance{}, + &BrokerTemplateInstanceList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/template/v1/types.go b/vendor/github.com/openshift/api/template/v1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..566c0af5feacbdc61b7fa9532e5a279c13a122bd --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/types.go @@ -0,0 +1,258 @@ +package v1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Template contains the inputs needed to produce a Config. +type Template struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // message is an optional instructional message that will + // be displayed when this template is instantiated. + // This field should inform the user how to utilize the newly created resources. + // Parameter substitution will be performed on the message before being + // displayed so that generated credentials and other parameters can be + // included in the output. + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + + // objects is an array of resources to include in this template. + // If a namespace value is hardcoded in the object, it will be removed + // during template instantiation, however if the namespace value + // is, or contains, a ${PARAMETER_REFERENCE}, the resolved + // value after parameter substitution will be respected and the object + // will be created in that namespace. + // +kubebuilder:pruning:PreserveUnknownFields + Objects []runtime.RawExtension `json:"objects" protobuf:"bytes,3,rep,name=objects"` + + // parameters is an optional array of Parameters used during the + // Template to Config transformation. + Parameters []Parameter `json:"parameters,omitempty" protobuf:"bytes,4,rep,name=parameters"` + + // labels is a optional set of labels that are applied to every + // object during the Template to Config transformation. + ObjectLabels map[string]string `json:"labels,omitempty" protobuf:"bytes,5,rep,name=labels"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TemplateList is a list of Template objects. +type TemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of templates + Items []Template `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Parameter defines a name/value variable that is to be processed during +// the Template to Config transformation. +type Parameter struct { + // Name must be set and it can be referenced in Template + // Items using ${PARAMETER_NAME}. Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Optional: The name that will show in UI instead of parameter 'Name' + DisplayName string `json:"displayName,omitempty" protobuf:"bytes,2,opt,name=displayName"` + + // Description of a parameter. Optional. + Description string `json:"description,omitempty" protobuf:"bytes,3,opt,name=description"` + + // Value holds the Parameter data. If specified, the generator will be + // ignored. The value replaces all occurrences of the Parameter ${Name} + // expression during the Template to Config transformation. Optional. + Value string `json:"value,omitempty" protobuf:"bytes,4,opt,name=value"` + + // generate specifies the generator to be used to generate random string + // from an input value specified by From field. The result string is + // stored into Value field. If empty, no generator is being used, leaving + // the result Value untouched. Optional. + // + // The only supported generator is "expression", which accepts a "from" + // value in the form of a simple regular expression containing the + // range expression "[a-zA-Z0-9]", and the length expression "a{length}". + // + // Examples: + // + // from | value + // ----------------------------- + // "test[0-9]{1}x" | "test7x" + // "[0-1]{8}" | "01001100" + // "0x[A-F0-9]{4}" | "0xB3AF" + // "[a-zA-Z0-9]{8}" | "hW4yQU5i" + // + Generate string `json:"generate,omitempty" protobuf:"bytes,5,opt,name=generate"` + + // From is an input value for the generator. Optional. + From string `json:"from,omitempty" protobuf:"bytes,6,opt,name=from"` + + // Optional: Indicates the parameter must have a value. Defaults to false. + Required bool `json:"required,omitempty" protobuf:"varint,7,opt,name=required"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TemplateInstance requests and records the instantiation of a Template. +// TemplateInstance is part of an experimental API. +type TemplateInstance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec describes the desired state of this TemplateInstance. + Spec TemplateInstanceSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // status describes the current state of this TemplateInstance. + // +optional + Status TemplateInstanceStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// TemplateInstanceSpec describes the desired state of a TemplateInstance. +type TemplateInstanceSpec struct { + // template is a full copy of the template for instantiation. + Template Template `json:"template" protobuf:"bytes,1,opt,name=template"` + + // secret is a reference to a Secret object containing the necessary + // template parameters. + Secret *corev1.LocalObjectReference `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` + + // requester holds the identity of the agent requesting the template + // instantiation. + // +optional + Requester *TemplateInstanceRequester `json:"requester" protobuf:"bytes,3,opt,name=requester"` +} + +// TemplateInstanceRequester holds the identity of an agent requesting a +// template instantiation. +type TemplateInstanceRequester struct { + // username uniquely identifies this user among all active users. + Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` + + // uid is a unique value that identifies this user across time; if this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` + + // groups represent the groups this user is a part of. + Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` + + // extra holds additional information provided by the authenticator. + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// TemplateInstanceStatus describes the current state of a TemplateInstance. +type TemplateInstanceStatus struct { + // conditions represent the latest available observations of a + // TemplateInstance's current state. + Conditions []TemplateInstanceCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` + + // Objects references the objects created by the TemplateInstance. + Objects []TemplateInstanceObject `json:"objects,omitempty" protobuf:"bytes,2,rep,name=objects"` +} + +// TemplateInstanceCondition contains condition information for a +// TemplateInstance. +type TemplateInstanceCondition struct { + // Type of the condition, currently Ready or InstantiateFailure. + Type TemplateInstanceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TemplateInstanceConditionType"` + // Status of the condition, one of True, False or Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status"` + // LastTransitionTime is the last time a condition status transitioned from + // one state to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // Reason is a brief machine readable explanation for the condition's last + // transition. + Reason string `json:"reason" protobuf:"bytes,4,opt,name=reason"` + // Message is a human readable description of the details of the last + // transition, complementing reason. + Message string `json:"message" protobuf:"bytes,5,opt,name=message"` +} + +// TemplateInstanceConditionType is the type of condition pertaining to a +// TemplateInstance. +type TemplateInstanceConditionType string + +const ( + // TemplateInstanceReady indicates the readiness of the template + // instantiation. + TemplateInstanceReady TemplateInstanceConditionType = "Ready" + // TemplateInstanceInstantiateFailure indicates the failure of the template + // instantiation + TemplateInstanceInstantiateFailure TemplateInstanceConditionType = "InstantiateFailure" +) + +// TemplateInstanceObject references an object created by a TemplateInstance. +type TemplateInstanceObject struct { + // ref is a reference to the created object. When used under .spec, only + // name and namespace are used; these can contain references to parameters + // which will be substituted following the usual rules. + Ref corev1.ObjectReference `json:"ref,omitempty" protobuf:"bytes,1,opt,name=ref"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TemplateInstanceList is a list of TemplateInstance objects. +type TemplateInstanceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of Templateinstances + Items []TemplateInstance `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BrokerTemplateInstance holds the service broker-related state associated with +// a TemplateInstance. BrokerTemplateInstance is part of an experimental API. +type BrokerTemplateInstance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec describes the state of this BrokerTemplateInstance. + Spec BrokerTemplateInstanceSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance. +type BrokerTemplateInstanceSpec struct { + // templateinstance is a reference to a TemplateInstance object residing + // in a namespace. + TemplateInstance corev1.ObjectReference `json:"templateInstance" protobuf:"bytes,1,opt,name=templateInstance"` + + // secret is a reference to a Secret object residing in a namespace, + // containing the necessary template parameters. + Secret corev1.ObjectReference `json:"secret" protobuf:"bytes,2,opt,name=secret"` + + // bindingids is a list of 'binding_id's provided during successive bind + // calls to the template service broker. + BindingIDs []string `json:"bindingIDs,omitempty" protobuf:"bytes,3,rep,name=bindingIDs"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BrokerTemplateInstanceList is a list of BrokerTemplateInstance objects. +type BrokerTemplateInstanceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of BrokerTemplateInstances + Items []BrokerTemplateInstance `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..ef6c4ee3002c0e632d6a7749955f4fc110f9defd --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go @@ -0,0 +1,393 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerTemplateInstance) DeepCopyInto(out *BrokerTemplateInstance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerTemplateInstance. +func (in *BrokerTemplateInstance) DeepCopy() *BrokerTemplateInstance { + if in == nil { + return nil + } + out := new(BrokerTemplateInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BrokerTemplateInstance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerTemplateInstanceList) DeepCopyInto(out *BrokerTemplateInstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BrokerTemplateInstance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerTemplateInstanceList. +func (in *BrokerTemplateInstanceList) DeepCopy() *BrokerTemplateInstanceList { + if in == nil { + return nil + } + out := new(BrokerTemplateInstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BrokerTemplateInstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerTemplateInstanceSpec) DeepCopyInto(out *BrokerTemplateInstanceSpec) { + *out = *in + out.TemplateInstance = in.TemplateInstance + out.Secret = in.Secret + if in.BindingIDs != nil { + in, out := &in.BindingIDs, &out.BindingIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerTemplateInstanceSpec. +func (in *BrokerTemplateInstanceSpec) DeepCopy() *BrokerTemplateInstanceSpec { + if in == nil { + return nil + } + out := new(BrokerTemplateInstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ExtraValue) DeepCopyInto(out *ExtraValue) { + { + in := &in + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. +func (in ExtraValue) DeepCopy() ExtraValue { + if in == nil { + return nil + } + out := new(ExtraValue) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Parameter) DeepCopyInto(out *Parameter) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Parameter. +func (in *Parameter) DeepCopy() *Parameter { + if in == nil { + return nil + } + out := new(Parameter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Template) DeepCopyInto(out *Template) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]Parameter, len(*in)) + copy(*out, *in) + } + if in.ObjectLabels != nil { + in, out := &in.ObjectLabels, &out.ObjectLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Template. +func (in *Template) DeepCopy() *Template { + if in == nil { + return nil + } + out := new(Template) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Template) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstance) DeepCopyInto(out *TemplateInstance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstance. +func (in *TemplateInstance) DeepCopy() *TemplateInstance { + if in == nil { + return nil + } + out := new(TemplateInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TemplateInstance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstanceCondition) DeepCopyInto(out *TemplateInstanceCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceCondition. +func (in *TemplateInstanceCondition) DeepCopy() *TemplateInstanceCondition { + if in == nil { + return nil + } + out := new(TemplateInstanceCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstanceList) DeepCopyInto(out *TemplateInstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TemplateInstance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceList. +func (in *TemplateInstanceList) DeepCopy() *TemplateInstanceList { + if in == nil { + return nil + } + out := new(TemplateInstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TemplateInstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstanceObject) DeepCopyInto(out *TemplateInstanceObject) { + *out = *in + out.Ref = in.Ref + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceObject. +func (in *TemplateInstanceObject) DeepCopy() *TemplateInstanceObject { + if in == nil { + return nil + } + out := new(TemplateInstanceObject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstanceRequester) DeepCopyInto(out *TemplateInstanceRequester) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceRequester. +func (in *TemplateInstanceRequester) DeepCopy() *TemplateInstanceRequester { + if in == nil { + return nil + } + out := new(TemplateInstanceRequester) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstanceSpec) DeepCopyInto(out *TemplateInstanceSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Requester != nil { + in, out := &in.Requester, &out.Requester + *out = new(TemplateInstanceRequester) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceSpec. +func (in *TemplateInstanceSpec) DeepCopy() *TemplateInstanceSpec { + if in == nil { + return nil + } + out := new(TemplateInstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstanceStatus) DeepCopyInto(out *TemplateInstanceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]TemplateInstanceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]TemplateInstanceObject, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceStatus. +func (in *TemplateInstanceStatus) DeepCopy() *TemplateInstanceStatus { + if in == nil { + return nil + } + out := new(TemplateInstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateList) DeepCopyInto(out *TemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Template, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateList. +func (in *TemplateList) DeepCopy() *TemplateList { + if in == nil { + return nil + } + out := new(TemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..bbf8153101cb1667a8bf50fd2f783eb045e33afe --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,153 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_BrokerTemplateInstance = map[string]string{ + "": "BrokerTemplateInstance holds the service broker-related state associated with a TemplateInstance. BrokerTemplateInstance is part of an experimental API.", + "spec": "spec describes the state of this BrokerTemplateInstance.", +} + +func (BrokerTemplateInstance) SwaggerDoc() map[string]string { + return map_BrokerTemplateInstance +} + +var map_BrokerTemplateInstanceList = map[string]string{ + "": "BrokerTemplateInstanceList is a list of BrokerTemplateInstance objects.", + "items": "items is a list of BrokerTemplateInstances", +} + +func (BrokerTemplateInstanceList) SwaggerDoc() map[string]string { + return map_BrokerTemplateInstanceList +} + +var map_BrokerTemplateInstanceSpec = map[string]string{ + "": "BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance.", + "templateInstance": "templateinstance is a reference to a TemplateInstance object residing in a namespace.", + "secret": "secret is a reference to a Secret object residing in a namespace, containing the necessary template parameters.", + "bindingIDs": "bindingids is a list of 'binding_id's provided during successive bind calls to the template service broker.", +} + +func (BrokerTemplateInstanceSpec) SwaggerDoc() map[string]string { + return map_BrokerTemplateInstanceSpec +} + +var map_Parameter = map[string]string{ + "": "Parameter defines a name/value variable that is to be processed during the Template to Config transformation.", + "name": "Name must be set and it can be referenced in Template Items using ${PARAMETER_NAME}. Required.", + "displayName": "Optional: The name that will show in UI instead of parameter 'Name'", + "description": "Description of a parameter. Optional.", + "value": "Value holds the Parameter data. If specified, the generator will be ignored. The value replaces all occurrences of the Parameter ${Name} expression during the Template to Config transformation. Optional.", + "generate": "generate specifies the generator to be used to generate random string from an input value specified by From field. The result string is stored into Value field. If empty, no generator is being used, leaving the result Value untouched. Optional.\n\nThe only supported generator is \"expression\", which accepts a \"from\" value in the form of a simple regular expression containing the range expression \"[a-zA-Z0-9]\", and the length expression \"a{length}\".\n\nExamples:\n\nfrom | value", + "from": "From is an input value for the generator. Optional.", + "required": "Optional: Indicates the parameter must have a value. Defaults to false.", +} + +func (Parameter) SwaggerDoc() map[string]string { + return map_Parameter +} + +var map_Template = map[string]string{ + "": "Template contains the inputs needed to produce a Config.", + "message": "message is an optional instructional message that will be displayed when this template is instantiated. This field should inform the user how to utilize the newly created resources. Parameter substitution will be performed on the message before being displayed so that generated credentials and other parameters can be included in the output.", + "objects": "objects is an array of resources to include in this template. If a namespace value is hardcoded in the object, it will be removed during template instantiation, however if the namespace value is, or contains, a ${PARAMETER_REFERENCE}, the resolved value after parameter substitution will be respected and the object will be created in that namespace.", + "parameters": "parameters is an optional array of Parameters used during the Template to Config transformation.", + "labels": "labels is a optional set of labels that are applied to every object during the Template to Config transformation.", +} + +func (Template) SwaggerDoc() map[string]string { + return map_Template +} + +var map_TemplateInstance = map[string]string{ + "": "TemplateInstance requests and records the instantiation of a Template. TemplateInstance is part of an experimental API.", + "spec": "spec describes the desired state of this TemplateInstance.", + "status": "status describes the current state of this TemplateInstance.", +} + +func (TemplateInstance) SwaggerDoc() map[string]string { + return map_TemplateInstance +} + +var map_TemplateInstanceCondition = map[string]string{ + "": "TemplateInstanceCondition contains condition information for a TemplateInstance.", + "type": "Type of the condition, currently Ready or InstantiateFailure.", + "status": "Status of the condition, one of True, False or Unknown.", + "lastTransitionTime": "LastTransitionTime is the last time a condition status transitioned from one state to another.", + "reason": "Reason is a brief machine readable explanation for the condition's last transition.", + "message": "Message is a human readable description of the details of the last transition, complementing reason.", +} + +func (TemplateInstanceCondition) SwaggerDoc() map[string]string { + return map_TemplateInstanceCondition +} + +var map_TemplateInstanceList = map[string]string{ + "": "TemplateInstanceList is a list of TemplateInstance objects.", + "items": "items is a list of Templateinstances", +} + +func (TemplateInstanceList) SwaggerDoc() map[string]string { + return map_TemplateInstanceList +} + +var map_TemplateInstanceObject = map[string]string{ + "": "TemplateInstanceObject references an object created by a TemplateInstance.", + "ref": "ref is a reference to the created object. When used under .spec, only name and namespace are used; these can contain references to parameters which will be substituted following the usual rules.", +} + +func (TemplateInstanceObject) SwaggerDoc() map[string]string { + return map_TemplateInstanceObject +} + +var map_TemplateInstanceRequester = map[string]string{ + "": "TemplateInstanceRequester holds the identity of an agent requesting a template instantiation.", + "username": "username uniquely identifies this user among all active users.", + "uid": "uid is a unique value that identifies this user across time; if this user is deleted and another user by the same name is added, they will have different UIDs.", + "groups": "groups represent the groups this user is a part of.", + "extra": "extra holds additional information provided by the authenticator.", +} + +func (TemplateInstanceRequester) SwaggerDoc() map[string]string { + return map_TemplateInstanceRequester +} + +var map_TemplateInstanceSpec = map[string]string{ + "": "TemplateInstanceSpec describes the desired state of a TemplateInstance.", + "template": "template is a full copy of the template for instantiation.", + "secret": "secret is a reference to a Secret object containing the necessary template parameters.", + "requester": "requester holds the identity of the agent requesting the template instantiation.", +} + +func (TemplateInstanceSpec) SwaggerDoc() map[string]string { + return map_TemplateInstanceSpec +} + +var map_TemplateInstanceStatus = map[string]string{ + "": "TemplateInstanceStatus describes the current state of a TemplateInstance.", + "conditions": "conditions represent the latest available observations of a TemplateInstance's current state.", + "objects": "Objects references the objects created by the TemplateInstance.", +} + +func (TemplateInstanceStatus) SwaggerDoc() map[string]string { + return map_TemplateInstanceStatus +} + +var map_TemplateList = map[string]string{ + "": "TemplateList is a list of Template objects.", + "items": "Items is a list of templates", +} + +func (TemplateList) SwaggerDoc() map[string]string { + return map_TemplateList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/user/install.go b/vendor/github.com/openshift/api/user/install.go new file mode 100644 index 0000000000000000000000000000000000000000..28d498062146c16f2b6ec6fd9276fceb0183c744 --- /dev/null +++ b/vendor/github.com/openshift/api/user/install.go @@ -0,0 +1,26 @@ +package user + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + userv1 "github.com/openshift/api/user/v1" +) + +const ( + GroupName = "user.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(userv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/user/v1/doc.go b/vendor/github.com/openshift/api/user/v1/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..42287095e242a23d5fbb2b2420019260c62b60bd --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/user/apis/user +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=user.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/user/v1/generated.pb.go b/vendor/github.com/openshift/api/user/v1/generated.pb.go new file mode 100644 index 0000000000000000000000000000000000000000..7e456203a8c282a758cc3785e407e8e4110e0465 --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/generated.pb.go @@ -0,0 +1,2324 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/user/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Group) Reset() { *m = Group{} } +func (*Group) ProtoMessage() {} +func (*Group) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{0} +} +func (m *Group) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Group) XXX_Merge(src proto.Message) { + xxx_messageInfo_Group.Merge(m, src) +} +func (m *Group) XXX_Size() int { + return m.Size() +} +func (m *Group) XXX_DiscardUnknown() { + xxx_messageInfo_Group.DiscardUnknown(m) +} + +var xxx_messageInfo_Group proto.InternalMessageInfo + +func (m *GroupList) Reset() { *m = GroupList{} } +func (*GroupList) ProtoMessage() {} +func (*GroupList) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{1} +} +func (m *GroupList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupList) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupList.Merge(m, src) +} +func (m *GroupList) XXX_Size() int { + return m.Size() +} +func (m *GroupList) XXX_DiscardUnknown() { + xxx_messageInfo_GroupList.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupList proto.InternalMessageInfo + +func (m *Identity) Reset() { *m = Identity{} } +func (*Identity) ProtoMessage() {} +func (*Identity) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{2} +} +func (m *Identity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Identity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Identity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Identity.Merge(m, src) +} +func (m *Identity) XXX_Size() int { + return m.Size() +} +func (m *Identity) XXX_DiscardUnknown() { + xxx_messageInfo_Identity.DiscardUnknown(m) +} + +var xxx_messageInfo_Identity proto.InternalMessageInfo + +func (m *IdentityList) Reset() { *m = IdentityList{} } +func (*IdentityList) ProtoMessage() {} +func (*IdentityList) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{3} +} +func (m *IdentityList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IdentityList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IdentityList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IdentityList.Merge(m, src) +} +func (m *IdentityList) XXX_Size() int { + return m.Size() +} +func (m *IdentityList) XXX_DiscardUnknown() { + xxx_messageInfo_IdentityList.DiscardUnknown(m) +} + +var xxx_messageInfo_IdentityList proto.InternalMessageInfo + +func (m *OptionalNames) Reset() { *m = OptionalNames{} } +func (*OptionalNames) ProtoMessage() {} +func (*OptionalNames) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{4} +} +func (m *OptionalNames) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OptionalNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OptionalNames) XXX_Merge(src proto.Message) { + xxx_messageInfo_OptionalNames.Merge(m, src) +} +func (m *OptionalNames) XXX_Size() int { + return m.Size() +} +func (m *OptionalNames) XXX_DiscardUnknown() { + xxx_messageInfo_OptionalNames.DiscardUnknown(m) +} + +var xxx_messageInfo_OptionalNames proto.InternalMessageInfo + +func (m *User) Reset() { *m = User{} } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{5} +} +func (m *User) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *User) XXX_Merge(src proto.Message) { + xxx_messageInfo_User.Merge(m, src) +} +func (m *User) XXX_Size() int { + return m.Size() +} +func (m *User) XXX_DiscardUnknown() { + xxx_messageInfo_User.DiscardUnknown(m) +} + +var xxx_messageInfo_User proto.InternalMessageInfo + +func (m *UserIdentityMapping) Reset() { *m = UserIdentityMapping{} } +func (*UserIdentityMapping) ProtoMessage() {} +func (*UserIdentityMapping) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{6} +} +func (m *UserIdentityMapping) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserIdentityMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserIdentityMapping) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserIdentityMapping.Merge(m, src) +} +func (m *UserIdentityMapping) XXX_Size() int { + return m.Size() +} +func (m *UserIdentityMapping) XXX_DiscardUnknown() { + xxx_messageInfo_UserIdentityMapping.DiscardUnknown(m) +} + +var xxx_messageInfo_UserIdentityMapping proto.InternalMessageInfo + +func (m *UserList) Reset() { *m = UserList{} } +func (*UserList) ProtoMessage() {} +func (*UserList) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{7} +} +func (m *UserList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserList) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserList.Merge(m, src) +} +func (m *UserList) XXX_Size() int { + return m.Size() +} +func (m *UserList) XXX_DiscardUnknown() { + xxx_messageInfo_UserList.DiscardUnknown(m) +} + +var xxx_messageInfo_UserList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Group)(nil), "github.com.openshift.api.user.v1.Group") + proto.RegisterType((*GroupList)(nil), "github.com.openshift.api.user.v1.GroupList") + proto.RegisterType((*Identity)(nil), "github.com.openshift.api.user.v1.Identity") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.user.v1.Identity.ExtraEntry") + proto.RegisterType((*IdentityList)(nil), "github.com.openshift.api.user.v1.IdentityList") + proto.RegisterType((*OptionalNames)(nil), "github.com.openshift.api.user.v1.OptionalNames") + proto.RegisterType((*User)(nil), "github.com.openshift.api.user.v1.User") + proto.RegisterType((*UserIdentityMapping)(nil), "github.com.openshift.api.user.v1.UserIdentityMapping") + proto.RegisterType((*UserList)(nil), "github.com.openshift.api.user.v1.UserList") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/user/v1/generated.proto", fileDescriptor_ea159b02d89a1362) +} + +var fileDescriptor_ea159b02d89a1362 = []byte{ + // 724 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0x3d, 0x6f, 0x13, 0x4b, + 0x14, 0xf5, 0xd8, 0xde, 0xc8, 0x9e, 0x38, 0x4f, 0xd6, 0xbe, 0x14, 0x96, 0x8b, 0xb5, 0xb5, 0x4f, + 0x7a, 0xcf, 0x7a, 0x82, 0xd9, 0x38, 0x02, 0x64, 0xa5, 0xb4, 0x08, 0x28, 0x22, 0x21, 0x61, 0x24, + 0x9a, 0x88, 0x82, 0x89, 0x3d, 0x5e, 0x0f, 0xf6, 0x7e, 0x68, 0x77, 0xd6, 0xc2, 0x5d, 0x7e, 0x02, + 0x74, 0x94, 0xfc, 0x09, 0x44, 0x81, 0xe8, 0x43, 0x97, 0x32, 0x05, 0xb2, 0xc8, 0xd2, 0xf1, 0x2b, + 0xd0, 0xcc, 0x7e, 0x78, 0x9d, 0x0f, 0x39, 0x12, 0x92, 0xbb, 0x9d, 0x3b, 0xf7, 0x9c, 0x39, 0xf7, + 0xdc, 0x7b, 0x17, 0x6e, 0x99, 0x8c, 0x0f, 0x83, 0x13, 0xd4, 0x73, 0x2c, 0xc3, 0x71, 0xa9, 0xed, + 0x0f, 0xd9, 0x80, 0x1b, 0xc4, 0x65, 0x46, 0xe0, 0x53, 0xcf, 0x98, 0xb4, 0x0d, 0x93, 0xda, 0xd4, + 0x23, 0x9c, 0xf6, 0x91, 0xeb, 0x39, 0xdc, 0x51, 0x9b, 0x73, 0x04, 0x4a, 0x11, 0x88, 0xb8, 0x0c, + 0x09, 0x04, 0x9a, 0xb4, 0xeb, 0xf7, 0x33, 0x9c, 0xa6, 0x63, 0x3a, 0x86, 0x04, 0x9e, 0x04, 0x03, + 0x79, 0x92, 0x07, 0xf9, 0x15, 0x11, 0xd6, 0xf5, 0x51, 0xc7, 0x47, 0xcc, 0x91, 0x8f, 0xf6, 0x1c, + 0x8f, 0xde, 0xf0, 0x68, 0xfd, 0xc1, 0x3c, 0xc7, 0x22, 0xbd, 0x21, 0xb3, 0xa9, 0x37, 0x35, 0xdc, + 0x91, 0x29, 0x02, 0xbe, 0x61, 0x51, 0x4e, 0x6e, 0x42, 0x3d, 0xba, 0x0d, 0xe5, 0x05, 0x36, 0x67, + 0x16, 0x35, 0xfc, 0xde, 0x90, 0x5a, 0xe4, 0x2a, 0x4e, 0xff, 0x02, 0xa0, 0xf2, 0xd4, 0x73, 0x02, + 0x57, 0x7d, 0x0d, 0x4b, 0x82, 0xbc, 0x4f, 0x38, 0xa9, 0x81, 0x26, 0x68, 0xad, 0x6f, 0x6f, 0xa1, + 0x88, 0x14, 0x65, 0x49, 0x91, 0x3b, 0x32, 0x45, 0xc0, 0x47, 0x22, 0x1b, 0x4d, 0xda, 0xe8, 0xf0, + 0xe4, 0x0d, 0xed, 0xf1, 0x03, 0xca, 0x49, 0x57, 0x3d, 0x9b, 0x35, 0x72, 0xe1, 0xac, 0x01, 0xe7, + 0x31, 0x9c, 0xb2, 0xaa, 0x47, 0x50, 0x11, 0xbe, 0xf9, 0xb5, 0xbc, 0xa4, 0x37, 0xd0, 0x32, 0x7b, + 0xd1, 0xa1, 0xcb, 0x99, 0x63, 0x93, 0xf1, 0x73, 0x62, 0x51, 0xbf, 0x5b, 0x0e, 0x67, 0x0d, 0xe5, + 0xa5, 0x60, 0xc0, 0x11, 0x91, 0xfe, 0x19, 0xc0, 0xb2, 0x54, 0xbf, 0xcf, 0x7c, 0xae, 0xbe, 0xba, + 0x56, 0x01, 0xba, 0x5b, 0x05, 0x02, 0x2d, 0xf5, 0x57, 0x63, 0xfd, 0xa5, 0x24, 0x92, 0x51, 0xbf, + 0x0f, 0x15, 0xc6, 0xa9, 0x25, 0xd4, 0x17, 0x5a, 0xeb, 0xdb, 0xff, 0x2d, 0x57, 0x2f, 0x95, 0x75, + 0x37, 0x62, 0x4e, 0x65, 0x4f, 0xa0, 0x71, 0x44, 0xa2, 0x7f, 0x2b, 0xc0, 0xd2, 0x5e, 0x9f, 0xda, + 0x9c, 0xf1, 0xe9, 0x0a, 0xac, 0xef, 0xc0, 0x8a, 0xeb, 0x39, 0x13, 0xd6, 0xa7, 0x9e, 0xf0, 0x52, + 0x76, 0xa0, 0xdc, 0xdd, 0x8c, 0x31, 0x95, 0xa3, 0xcc, 0x1d, 0x5e, 0xc8, 0x54, 0x1f, 0xc3, 0x6a, + 0x72, 0x16, 0xd6, 0x4b, 0x74, 0x41, 0xa2, 0x6b, 0x31, 0xba, 0x7a, 0x74, 0xe5, 0x1e, 0x5f, 0x43, + 0xa8, 0xbb, 0xb0, 0x28, 0x5c, 0xa9, 0x15, 0x65, 0x75, 0xff, 0x64, 0xaa, 0x43, 0x62, 0x0f, 0xe6, + 0xb5, 0x60, 0x3a, 0xa0, 0x1e, 0xb5, 0x7b, 0xb4, 0x5b, 0x89, 0xe9, 0x8b, 0x82, 0x04, 0x4b, 0xb8, + 0x7a, 0x0c, 0x15, 0xfa, 0x96, 0x7b, 0xa4, 0xa6, 0xc8, 0x1e, 0x3c, 0x5c, 0xde, 0x83, 0xc4, 0x63, + 0xb4, 0x2b, 0x70, 0xbb, 0x36, 0xf7, 0xa6, 0xf3, 0x8e, 0xc8, 0x18, 0x8e, 0x28, 0xeb, 0x1d, 0x08, + 0xe7, 0x39, 0x6a, 0x15, 0x16, 0x46, 0x74, 0x2a, 0xbb, 0x51, 0xc6, 0xe2, 0x53, 0xdd, 0x84, 0xca, + 0x84, 0x8c, 0x83, 0xd8, 0x3b, 0x1c, 0x1d, 0x76, 0xf2, 0x1d, 0xa0, 0x7f, 0x05, 0xb0, 0x92, 0xbc, + 0xb3, 0x82, 0x41, 0x3c, 0x5c, 0x1c, 0xc4, 0xff, 0xef, 0x6e, 0xc2, 0x2d, 0xb3, 0xb8, 0x03, 0x37, + 0x16, 0x16, 0x4d, 0x6d, 0x24, 0x2f, 0x80, 0x66, 0xa1, 0x55, 0x8e, 0xf6, 0x2e, 0x8b, 0xd8, 0x29, + 0x7d, 0xf8, 0xd8, 0xc8, 0x9d, 0x7e, 0x6f, 0xe6, 0xf4, 0x5f, 0x00, 0xca, 0x06, 0xad, 0x60, 0x86, + 0xef, 0xc1, 0xd2, 0x20, 0x18, 0x8f, 0x33, 0xf3, 0x9b, 0xba, 0xf4, 0x24, 0x8e, 0xe3, 0x34, 0x43, + 0x45, 0x10, 0xb2, 0xa8, 0x6c, 0x46, 0xfd, 0x5a, 0x41, 0x16, 0xf2, 0x97, 0xe0, 0xde, 0x4b, 0xa3, + 0x38, 0x93, 0xa1, 0xea, 0x70, 0xcd, 0x14, 0xfb, 0xea, 0xd7, 0x8a, 0x32, 0x17, 0x86, 0xb3, 0xc6, + 0x9a, 0xdc, 0x60, 0x1f, 0xc7, 0x37, 0xfa, 0xfb, 0x3c, 0xfc, 0x5b, 0x14, 0x9b, 0xf8, 0x79, 0x40, + 0x5c, 0x97, 0xd9, 0xe6, 0x0a, 0x6a, 0x7f, 0x01, 0x4b, 0xb1, 0xd6, 0x69, 0xfc, 0xf7, 0xbc, 0xd3, + 0x0e, 0xa5, 0x06, 0x25, 0x8a, 0x71, 0x4a, 0x93, 0xae, 0x64, 0xe1, 0x8f, 0x56, 0x52, 0xff, 0x04, + 0x60, 0x49, 0x1c, 0x57, 0x30, 0xf8, 0xcf, 0x16, 0x07, 0xff, 0xdf, 0xe5, 0x83, 0x2f, 0x84, 0xdd, + 0x3c, 0xf4, 0xdd, 0xd6, 0xd9, 0xa5, 0x96, 0x3b, 0xbf, 0xd4, 0x72, 0x17, 0x97, 0x5a, 0xee, 0x34, + 0xd4, 0xc0, 0x59, 0xa8, 0x81, 0xf3, 0x50, 0x03, 0x17, 0xa1, 0x06, 0x7e, 0x84, 0x1a, 0x78, 0xf7, + 0x53, 0xcb, 0x1d, 0xe7, 0x27, 0xed, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x87, 0x3b, 0x61, 0x21, + 0x38, 0x08, 0x00, 0x00, +} + +func (m *Group) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Group) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Group) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Users != nil { + { + size, err := m.Users.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Identity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Identity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Identity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + i -= len(m.ProviderUserName) + copy(dAtA[i:], m.ProviderUserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderUserName))) + i-- + dAtA[i] = 0x1a + i -= len(m.ProviderName) + copy(dAtA[i:], m.ProviderName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IdentityList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IdentityList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IdentityList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m OptionalNames) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m OptionalNames) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m OptionalNames) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *User) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *User) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *User) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Identities) > 0 { + for iNdEx := len(m.Identities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Identities[iNdEx]) + copy(dAtA[i:], m.Identities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Identities[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.FullName) + copy(dAtA[i:], m.FullName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FullName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UserIdentityMapping) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserIdentityMapping) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserIdentityMapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Identity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UserList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Group) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Users != nil { + l = m.Users.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *GroupList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Identity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ProviderName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ProviderUserName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *IdentityList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m OptionalNames) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *User) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FullName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Identities) > 0 { + for _, s := range m.Identities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *UserIdentityMapping) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Identity.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *UserList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Group) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Group{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Users:` + strings.Replace(fmt.Sprintf("%v", this.Users), "OptionalNames", "OptionalNames", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GroupList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Group{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Group", "Group", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&GroupList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Identity) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]string{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&Identity{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ProviderName:` + fmt.Sprintf("%v", this.ProviderName) + `,`, + `ProviderUserName:` + fmt.Sprintf("%v", this.ProviderUserName) + `,`, + `User:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.User), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func (this *IdentityList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Identity{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Identity", "Identity", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IdentityList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *User) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&User{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `FullName:` + fmt.Sprintf("%v", this.FullName) + `,`, + `Identities:` + fmt.Sprintf("%v", this.Identities) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `}`, + }, "") + return s +} +func (this *UserIdentityMapping) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserIdentityMapping{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Identity:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Identity), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `User:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.User), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *UserList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]User{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "User", "User", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&UserList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Group) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Group: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Group: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Users == nil { + m.Users = OptionalNames{} + } + if err := m.Users.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Group{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Identity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Identity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Identity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProviderName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderUserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProviderUserName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Extra == nil { + m.Extra = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Extra[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IdentityList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IdentityList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IdentityList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Identity{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalNames) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalNames: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalNames: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *User) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: User: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FullName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FullName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identities = append(m.Identities, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserIdentityMapping) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserIdentityMapping: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserIdentityMapping: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Identity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, User{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/openshift/api/user/v1/generated.proto b/vendor/github.com/openshift/api/user/v1/generated.proto new file mode 100644 index 0000000000000000000000000000000000000000..7444d41ef07b11aad67bc6235eab2a67396d517b --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/generated.proto @@ -0,0 +1,108 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package github.com.openshift.api.user.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Group represents a referenceable set of Users +message Group { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Users is the list of users in this group. + optional OptionalNames users = 2; +} + +// GroupList is a collection of Groups +message GroupList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of groups + repeated Group items = 2; +} + +// Identity records a successful authentication of a user with an identity provider. The +// information about the source of authentication is stored on the identity, and the identity +// is then associated with a single user object. Multiple identities can reference a single +// user. Information retrieved from the authentication provider is stored in the extra field +// using a schema determined by the provider. +message Identity { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // ProviderName is the source of identity information + optional string providerName = 2; + + // ProviderUserName uniquely represents this identity in the scope of the provider + optional string providerUserName = 3; + + // User is a reference to the user this identity is associated with + // Both Name and UID must be set + optional k8s.io.api.core.v1.ObjectReference user = 4; + + // Extra holds extra information about this identity + map<string, string> extra = 5; +} + +// IdentityList is a collection of Identities +message IdentityList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of identities + repeated Identity items = 2; +} + +// OptionalNames is an array that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message OptionalNames { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// Upon log in, every user of the system receives a User and Identity resource. Administrators +// may directly manipulate the attributes of the users for their own tracking, or set groups +// via the API. The user name is unique and is chosen based on the value provided by the +// identity provider - if a user already exists with the incoming name, the user name may have +// a number appended to it depending on the configuration of the system. +message User { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // FullName is the full name of user + optional string fullName = 2; + + // Identities are the identities associated with this user + repeated string identities = 3; + + // Groups specifies group names this user is a member of. + // This field is deprecated and will be removed in a future release. + // Instead, create a Group object containing the name of this User. + repeated string groups = 4; +} + +// UserIdentityMapping maps a user to an identity +message UserIdentityMapping { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Identity is a reference to an identity + optional k8s.io.api.core.v1.ObjectReference identity = 2; + + // User is a reference to a user + optional k8s.io.api.core.v1.ObjectReference user = 3; +} + +// UserList is a collection of Users +message UserList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of users + repeated User items = 2; +} + diff --git a/vendor/github.com/openshift/api/user/v1/legacy.go b/vendor/github.com/openshift/api/user/v1/legacy.go new file mode 100644 index 0000000000000000000000000000000000000000..6817a9f1f3ea9a7c0c671f793e660f9b5da9bbd0 --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/legacy.go @@ -0,0 +1,27 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &User{}, + &UserList{}, + &Identity{}, + &IdentityList{}, + &UserIdentityMapping{}, + &Group{}, + &GroupList{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/user/v1/register.go b/vendor/github.com/openshift/api/user/v1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..11341d72a98acf30c5e47481a498a49fa94cdd12 --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/register.go @@ -0,0 +1,44 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "user.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &User{}, + &UserList{}, + &Identity{}, + &IdentityList{}, + &UserIdentityMapping{}, + &Group{}, + &GroupList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/user/v1/types.go b/vendor/github.com/openshift/api/user/v1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..8708d027353f16d8b96007a0400459cec765bbb6 --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/types.go @@ -0,0 +1,131 @@ +package v1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Upon log in, every user of the system receives a User and Identity resource. Administrators +// may directly manipulate the attributes of the users for their own tracking, or set groups +// via the API. The user name is unique and is chosen based on the value provided by the +// identity provider - if a user already exists with the incoming name, the user name may have +// a number appended to it depending on the configuration of the system. +type User struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // FullName is the full name of user + FullName string `json:"fullName,omitempty" protobuf:"bytes,2,opt,name=fullName"` + + // Identities are the identities associated with this user + Identities []string `json:"identities" protobuf:"bytes,3,rep,name=identities"` + + // Groups specifies group names this user is a member of. + // This field is deprecated and will be removed in a future release. + // Instead, create a Group object containing the name of this User. + Groups []string `json:"groups" protobuf:"bytes,4,rep,name=groups"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// UserList is a collection of Users +type UserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of users + Items []User `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Identity records a successful authentication of a user with an identity provider. The +// information about the source of authentication is stored on the identity, and the identity +// is then associated with a single user object. Multiple identities can reference a single +// user. Information retrieved from the authentication provider is stored in the extra field +// using a schema determined by the provider. +type Identity struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // ProviderName is the source of identity information + ProviderName string `json:"providerName" protobuf:"bytes,2,opt,name=providerName"` + + // ProviderUserName uniquely represents this identity in the scope of the provider + ProviderUserName string `json:"providerUserName" protobuf:"bytes,3,opt,name=providerUserName"` + + // User is a reference to the user this identity is associated with + // Both Name and UID must be set + User corev1.ObjectReference `json:"user" protobuf:"bytes,4,opt,name=user"` + + // Extra holds extra information about this identity + Extra map[string]string `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IdentityList is a collection of Identities +type IdentityList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of identities + Items []Identity `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=get,create,update,delete +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// UserIdentityMapping maps a user to an identity +type UserIdentityMapping struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Identity is a reference to an identity + Identity corev1.ObjectReference `json:"identity,omitempty" protobuf:"bytes,2,opt,name=identity"` + // User is a reference to a user + User corev1.ObjectReference `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` +} + +// OptionalNames is an array that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type OptionalNames []string + +func (t OptionalNames) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Group represents a referenceable set of Users +type Group struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Users is the list of users in this group. + Users OptionalNames `json:"users" protobuf:"bytes,2,rep,name=users"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GroupList is a collection of Groups +type GroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of groups + Items []Group `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..6e45450adb36e7bc8e7264499faa94183fe52a4c --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go @@ -0,0 +1,257 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Group) DeepCopyInto(out *Group) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Group. +func (in *Group) DeepCopy() *Group { + if in == nil { + return nil + } + out := new(Group) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Group) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupList) DeepCopyInto(out *GroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Group, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupList. +func (in *GroupList) DeepCopy() *GroupList { + if in == nil { + return nil + } + out := new(GroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Identity) DeepCopyInto(out *Identity) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.User = in.User + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Identity. +func (in *Identity) DeepCopy() *Identity { + if in == nil { + return nil + } + out := new(Identity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Identity) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityList) DeepCopyInto(out *IdentityList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Identity, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityList. +func (in *IdentityList) DeepCopy() *IdentityList { + if in == nil { + return nil + } + out := new(IdentityList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IdentityList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in OptionalNames) DeepCopyInto(out *OptionalNames) { + { + in := &in + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalNames. +func (in OptionalNames) DeepCopy() OptionalNames { + if in == nil { + return nil + } + out := new(OptionalNames) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *User) DeepCopyInto(out *User) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Identities != nil { + in, out := &in.Identities, &out.Identities + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User. +func (in *User) DeepCopy() *User { + if in == nil { + return nil + } + out := new(User) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *User) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserIdentityMapping) DeepCopyInto(out *UserIdentityMapping) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Identity = in.Identity + out.User = in.User + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserIdentityMapping. +func (in *UserIdentityMapping) DeepCopy() *UserIdentityMapping { + if in == nil { + return nil + } + out := new(UserIdentityMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserIdentityMapping) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserList) DeepCopyInto(out *UserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]User, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserList. +func (in *UserList) DeepCopy() *UserList { + if in == nil { + return nil + } + out := new(UserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..e034251f965c7e6988a74e3a2d38ad4b7c83d15f --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,83 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Group = map[string]string{ + "": "Group represents a referenceable set of Users", + "users": "Users is the list of users in this group.", +} + +func (Group) SwaggerDoc() map[string]string { + return map_Group +} + +var map_GroupList = map[string]string{ + "": "GroupList is a collection of Groups", + "items": "Items is the list of groups", +} + +func (GroupList) SwaggerDoc() map[string]string { + return map_GroupList +} + +var map_Identity = map[string]string{ + "": "Identity records a successful authentication of a user with an identity provider. The information about the source of authentication is stored on the identity, and the identity is then associated with a single user object. Multiple identities can reference a single user. Information retrieved from the authentication provider is stored in the extra field using a schema determined by the provider.", + "providerName": "ProviderName is the source of identity information", + "providerUserName": "ProviderUserName uniquely represents this identity in the scope of the provider", + "user": "User is a reference to the user this identity is associated with Both Name and UID must be set", + "extra": "Extra holds extra information about this identity", +} + +func (Identity) SwaggerDoc() map[string]string { + return map_Identity +} + +var map_IdentityList = map[string]string{ + "": "IdentityList is a collection of Identities", + "items": "Items is the list of identities", +} + +func (IdentityList) SwaggerDoc() map[string]string { + return map_IdentityList +} + +var map_User = map[string]string{ + "": "Upon log in, every user of the system receives a User and Identity resource. Administrators may directly manipulate the attributes of the users for their own tracking, or set groups via the API. The user name is unique and is chosen based on the value provided by the identity provider - if a user already exists with the incoming name, the user name may have a number appended to it depending on the configuration of the system.", + "fullName": "FullName is the full name of user", + "identities": "Identities are the identities associated with this user", + "groups": "Groups specifies group names this user is a member of. This field is deprecated and will be removed in a future release. Instead, create a Group object containing the name of this User.", +} + +func (User) SwaggerDoc() map[string]string { + return map_User +} + +var map_UserIdentityMapping = map[string]string{ + "": "UserIdentityMapping maps a user to an identity", + "identity": "Identity is a reference to an identity", + "user": "User is a reference to a user", +} + +func (UserIdentityMapping) SwaggerDoc() map[string]string { + return map_UserIdentityMapping +} + +var map_UserList = map[string]string{ + "": "UserList is a collection of Users", + "items": "Items is the list of users", +} + +func (UserList) SwaggerDoc() map[string]string { + return map_UserList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/scheme/doc.go deleted file mode 100644 index 14db57a58f8d2535aae9393d9668432f4db2346c..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/client-go/security/clientset/versioned/scheme/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -// This package contains the scheme of the automatically generated clientset. -package scheme diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/scheme/register.go deleted file mode 100644 index 871d4424f3e04679db6a71cd0ea8c99d2409e46d..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/client-go/security/clientset/versioned/scheme/register.go +++ /dev/null @@ -1,40 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package scheme - -import ( - securityv1 "github.com/openshift/api/security/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" -) - -var Scheme = runtime.NewScheme() -var Codecs = serializer.NewCodecFactory(Scheme) -var ParameterCodec = runtime.NewParameterCodec(Scheme) -var localSchemeBuilder = runtime.SchemeBuilder{ - securityv1.AddToScheme, -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -var AddToScheme = localSchemeBuilder.AddToScheme - -func init() { - v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(AddToScheme(Scheme)) -} diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/doc.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/doc.go deleted file mode 100644 index 225e6b2be34f2a207fca9fd94c010b04782dbc47..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1 diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/generated_expansion.go deleted file mode 100644 index db29417c88c73056a375eb386a314611c99788d5..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/generated_expansion.go +++ /dev/null @@ -1,13 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -type PodSecurityPolicyReviewExpansion interface{} - -type PodSecurityPolicySelfSubjectReviewExpansion interface{} - -type PodSecurityPolicySubjectReviewExpansion interface{} - -type RangeAllocationExpansion interface{} - -type SecurityContextConstraintsExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicyreview.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicyreview.go deleted file mode 100644 index cde9e9fb0dca986584d5af7ce629c04fd2052728..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicyreview.go +++ /dev/null @@ -1,46 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/openshift/api/security/v1" - rest "k8s.io/client-go/rest" -) - -// PodSecurityPolicyReviewsGetter has a method to return a PodSecurityPolicyReviewInterface. -// A group's client should implement this interface. -type PodSecurityPolicyReviewsGetter interface { - PodSecurityPolicyReviews(namespace string) PodSecurityPolicyReviewInterface -} - -// PodSecurityPolicyReviewInterface has methods to work with PodSecurityPolicyReview resources. -type PodSecurityPolicyReviewInterface interface { - Create(*v1.PodSecurityPolicyReview) (*v1.PodSecurityPolicyReview, error) - PodSecurityPolicyReviewExpansion -} - -// podSecurityPolicyReviews implements PodSecurityPolicyReviewInterface -type podSecurityPolicyReviews struct { - client rest.Interface - ns string -} - -// newPodSecurityPolicyReviews returns a PodSecurityPolicyReviews -func newPodSecurityPolicyReviews(c *SecurityV1Client, namespace string) *podSecurityPolicyReviews { - return &podSecurityPolicyReviews{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a podSecurityPolicyReview and creates it. Returns the server's representation of the podSecurityPolicyReview, and an error, if there is any. -func (c *podSecurityPolicyReviews) Create(podSecurityPolicyReview *v1.PodSecurityPolicyReview) (result *v1.PodSecurityPolicyReview, err error) { - result = &v1.PodSecurityPolicyReview{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podsecuritypolicyreviews"). - Body(podSecurityPolicyReview). - Do(). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicyselfsubjectreview.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicyselfsubjectreview.go deleted file mode 100644 index 98f9b1dc4939cfaa2e7396dfd5f75f96e44fae29..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicyselfsubjectreview.go +++ /dev/null @@ -1,46 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/openshift/api/security/v1" - rest "k8s.io/client-go/rest" -) - -// PodSecurityPolicySelfSubjectReviewsGetter has a method to return a PodSecurityPolicySelfSubjectReviewInterface. -// A group's client should implement this interface. -type PodSecurityPolicySelfSubjectReviewsGetter interface { - PodSecurityPolicySelfSubjectReviews(namespace string) PodSecurityPolicySelfSubjectReviewInterface -} - -// PodSecurityPolicySelfSubjectReviewInterface has methods to work with PodSecurityPolicySelfSubjectReview resources. -type PodSecurityPolicySelfSubjectReviewInterface interface { - Create(*v1.PodSecurityPolicySelfSubjectReview) (*v1.PodSecurityPolicySelfSubjectReview, error) - PodSecurityPolicySelfSubjectReviewExpansion -} - -// podSecurityPolicySelfSubjectReviews implements PodSecurityPolicySelfSubjectReviewInterface -type podSecurityPolicySelfSubjectReviews struct { - client rest.Interface - ns string -} - -// newPodSecurityPolicySelfSubjectReviews returns a PodSecurityPolicySelfSubjectReviews -func newPodSecurityPolicySelfSubjectReviews(c *SecurityV1Client, namespace string) *podSecurityPolicySelfSubjectReviews { - return &podSecurityPolicySelfSubjectReviews{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a podSecurityPolicySelfSubjectReview and creates it. Returns the server's representation of the podSecurityPolicySelfSubjectReview, and an error, if there is any. -func (c *podSecurityPolicySelfSubjectReviews) Create(podSecurityPolicySelfSubjectReview *v1.PodSecurityPolicySelfSubjectReview) (result *v1.PodSecurityPolicySelfSubjectReview, err error) { - result = &v1.PodSecurityPolicySelfSubjectReview{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podsecuritypolicyselfsubjectreviews"). - Body(podSecurityPolicySelfSubjectReview). - Do(). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicysubjectreview.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicysubjectreview.go deleted file mode 100644 index 2f0e34cf0f9ce7ea0c1d5639856d1e9c7201e748..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/podsecuritypolicysubjectreview.go +++ /dev/null @@ -1,46 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/openshift/api/security/v1" - rest "k8s.io/client-go/rest" -) - -// PodSecurityPolicySubjectReviewsGetter has a method to return a PodSecurityPolicySubjectReviewInterface. -// A group's client should implement this interface. -type PodSecurityPolicySubjectReviewsGetter interface { - PodSecurityPolicySubjectReviews(namespace string) PodSecurityPolicySubjectReviewInterface -} - -// PodSecurityPolicySubjectReviewInterface has methods to work with PodSecurityPolicySubjectReview resources. -type PodSecurityPolicySubjectReviewInterface interface { - Create(*v1.PodSecurityPolicySubjectReview) (*v1.PodSecurityPolicySubjectReview, error) - PodSecurityPolicySubjectReviewExpansion -} - -// podSecurityPolicySubjectReviews implements PodSecurityPolicySubjectReviewInterface -type podSecurityPolicySubjectReviews struct { - client rest.Interface - ns string -} - -// newPodSecurityPolicySubjectReviews returns a PodSecurityPolicySubjectReviews -func newPodSecurityPolicySubjectReviews(c *SecurityV1Client, namespace string) *podSecurityPolicySubjectReviews { - return &podSecurityPolicySubjectReviews{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Create takes the representation of a podSecurityPolicySubjectReview and creates it. Returns the server's representation of the podSecurityPolicySubjectReview, and an error, if there is any. -func (c *podSecurityPolicySubjectReviews) Create(podSecurityPolicySubjectReview *v1.PodSecurityPolicySubjectReview) (result *v1.PodSecurityPolicySubjectReview, err error) { - result = &v1.PodSecurityPolicySubjectReview{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podsecuritypolicysubjectreviews"). - Body(podSecurityPolicySubjectReview). - Do(). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/rangeallocation.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/rangeallocation.go deleted file mode 100644 index b42c320df3273bdca436d6281d8e077c68e781be..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/rangeallocation.go +++ /dev/null @@ -1,148 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "time" - - v1 "github.com/openshift/api/security/v1" - scheme "github.com/openshift/client-go/security/clientset/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// RangeAllocationsGetter has a method to return a RangeAllocationInterface. -// A group's client should implement this interface. -type RangeAllocationsGetter interface { - RangeAllocations() RangeAllocationInterface -} - -// RangeAllocationInterface has methods to work with RangeAllocation resources. -type RangeAllocationInterface interface { - Create(*v1.RangeAllocation) (*v1.RangeAllocation, error) - Update(*v1.RangeAllocation) (*v1.RangeAllocation, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.RangeAllocation, error) - List(opts metav1.ListOptions) (*v1.RangeAllocationList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.RangeAllocation, err error) - RangeAllocationExpansion -} - -// rangeAllocations implements RangeAllocationInterface -type rangeAllocations struct { - client rest.Interface -} - -// newRangeAllocations returns a RangeAllocations -func newRangeAllocations(c *SecurityV1Client) *rangeAllocations { - return &rangeAllocations{ - client: c.RESTClient(), - } -} - -// Get takes name of the rangeAllocation, and returns the corresponding rangeAllocation object, and an error if there is any. -func (c *rangeAllocations) Get(name string, options metav1.GetOptions) (result *v1.RangeAllocation, err error) { - result = &v1.RangeAllocation{} - err = c.client.Get(). - Resource("rangeallocations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RangeAllocations that match those selectors. -func (c *rangeAllocations) List(opts metav1.ListOptions) (result *v1.RangeAllocationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.RangeAllocationList{} - err = c.client.Get(). - Resource("rangeallocations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested rangeAllocations. -func (c *rangeAllocations) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("rangeallocations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a rangeAllocation and creates it. Returns the server's representation of the rangeAllocation, and an error, if there is any. -func (c *rangeAllocations) Create(rangeAllocation *v1.RangeAllocation) (result *v1.RangeAllocation, err error) { - result = &v1.RangeAllocation{} - err = c.client.Post(). - Resource("rangeallocations"). - Body(rangeAllocation). - Do(). - Into(result) - return -} - -// Update takes the representation of a rangeAllocation and updates it. Returns the server's representation of the rangeAllocation, and an error, if there is any. -func (c *rangeAllocations) Update(rangeAllocation *v1.RangeAllocation) (result *v1.RangeAllocation, err error) { - result = &v1.RangeAllocation{} - err = c.client.Put(). - Resource("rangeallocations"). - Name(rangeAllocation.Name). - Body(rangeAllocation). - Do(). - Into(result) - return -} - -// Delete takes name of the rangeAllocation and deletes it. Returns an error if one occurs. -func (c *rangeAllocations) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("rangeallocations"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *rangeAllocations) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("rangeallocations"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched rangeAllocation. -func (c *rangeAllocations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.RangeAllocation, err error) { - result = &v1.RangeAllocation{} - err = c.client.Patch(pt). - Resource("rangeallocations"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/security_client.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/security_client.go deleted file mode 100644 index 6ec1a5707aaeca2111c0ec406b3b94739d76c2f5..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/security_client.go +++ /dev/null @@ -1,93 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/openshift/api/security/v1" - "github.com/openshift/client-go/security/clientset/versioned/scheme" - rest "k8s.io/client-go/rest" -) - -type SecurityV1Interface interface { - RESTClient() rest.Interface - PodSecurityPolicyReviewsGetter - PodSecurityPolicySelfSubjectReviewsGetter - PodSecurityPolicySubjectReviewsGetter - RangeAllocationsGetter - SecurityContextConstraintsGetter -} - -// SecurityV1Client is used to interact with features provided by the security.openshift.io group. -type SecurityV1Client struct { - restClient rest.Interface -} - -func (c *SecurityV1Client) PodSecurityPolicyReviews(namespace string) PodSecurityPolicyReviewInterface { - return newPodSecurityPolicyReviews(c, namespace) -} - -func (c *SecurityV1Client) PodSecurityPolicySelfSubjectReviews(namespace string) PodSecurityPolicySelfSubjectReviewInterface { - return newPodSecurityPolicySelfSubjectReviews(c, namespace) -} - -func (c *SecurityV1Client) PodSecurityPolicySubjectReviews(namespace string) PodSecurityPolicySubjectReviewInterface { - return newPodSecurityPolicySubjectReviews(c, namespace) -} - -func (c *SecurityV1Client) RangeAllocations() RangeAllocationInterface { - return newRangeAllocations(c) -} - -func (c *SecurityV1Client) SecurityContextConstraints() SecurityContextConstraintsInterface { - return newSecurityContextConstraints(c) -} - -// NewForConfig creates a new SecurityV1Client for the given config. -func NewForConfig(c *rest.Config) (*SecurityV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &SecurityV1Client{client}, nil -} - -// NewForConfigOrDie creates a new SecurityV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *SecurityV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new SecurityV1Client for the given RESTClient. -func New(c rest.Interface) *SecurityV1Client { - return &SecurityV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *SecurityV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/securitycontextconstraints.go b/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/securitycontextconstraints.go deleted file mode 100644 index 593b4976f9716fd550341ebe1cfd425f6f015527..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/securitycontextconstraints.go +++ /dev/null @@ -1,148 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "time" - - v1 "github.com/openshift/api/security/v1" - scheme "github.com/openshift/client-go/security/clientset/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// SecurityContextConstraintsGetter has a method to return a SecurityContextConstraintsInterface. -// A group's client should implement this interface. -type SecurityContextConstraintsGetter interface { - SecurityContextConstraints() SecurityContextConstraintsInterface -} - -// SecurityContextConstraintsInterface has methods to work with SecurityContextConstraints resources. -type SecurityContextConstraintsInterface interface { - Create(*v1.SecurityContextConstraints) (*v1.SecurityContextConstraints, error) - Update(*v1.SecurityContextConstraints) (*v1.SecurityContextConstraints, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.SecurityContextConstraints, error) - List(opts metav1.ListOptions) (*v1.SecurityContextConstraintsList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.SecurityContextConstraints, err error) - SecurityContextConstraintsExpansion -} - -// securityContextConstraints implements SecurityContextConstraintsInterface -type securityContextConstraints struct { - client rest.Interface -} - -// newSecurityContextConstraints returns a SecurityContextConstraints -func newSecurityContextConstraints(c *SecurityV1Client) *securityContextConstraints { - return &securityContextConstraints{ - client: c.RESTClient(), - } -} - -// Get takes name of the securityContextConstraints, and returns the corresponding securityContextConstraints object, and an error if there is any. -func (c *securityContextConstraints) Get(name string, options metav1.GetOptions) (result *v1.SecurityContextConstraints, err error) { - result = &v1.SecurityContextConstraints{} - err = c.client.Get(). - Resource("securitycontextconstraints"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of SecurityContextConstraints that match those selectors. -func (c *securityContextConstraints) List(opts metav1.ListOptions) (result *v1.SecurityContextConstraintsList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.SecurityContextConstraintsList{} - err = c.client.Get(). - Resource("securitycontextconstraints"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested securityContextConstraints. -func (c *securityContextConstraints) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("securitycontextconstraints"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a securityContextConstraints and creates it. Returns the server's representation of the securityContextConstraints, and an error, if there is any. -func (c *securityContextConstraints) Create(securityContextConstraints *v1.SecurityContextConstraints) (result *v1.SecurityContextConstraints, err error) { - result = &v1.SecurityContextConstraints{} - err = c.client.Post(). - Resource("securitycontextconstraints"). - Body(securityContextConstraints). - Do(). - Into(result) - return -} - -// Update takes the representation of a securityContextConstraints and updates it. Returns the server's representation of the securityContextConstraints, and an error, if there is any. -func (c *securityContextConstraints) Update(securityContextConstraints *v1.SecurityContextConstraints) (result *v1.SecurityContextConstraints, err error) { - result = &v1.SecurityContextConstraints{} - err = c.client.Put(). - Resource("securitycontextconstraints"). - Name(securityContextConstraints.Name). - Body(securityContextConstraints). - Do(). - Into(result) - return -} - -// Delete takes name of the securityContextConstraints and deletes it. Returns an error if one occurs. -func (c *securityContextConstraints) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("securitycontextconstraints"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *securityContextConstraints) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("securitycontextconstraints"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched securityContextConstraints. -func (c *securityContextConstraints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.SecurityContextConstraints, err error) { - result = &v1.SecurityContextConstraints{} - err = c.client.Patch(pt). - Resource("securitycontextconstraints"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apiext.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apiext.go deleted file mode 100644 index 5ef3a78720fac582167ecaca75b575b84fefdea0..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apiext.go +++ /dev/null @@ -1,60 +0,0 @@ -package resourceapply - -import ( - "github.com/openshift/cluster-version-operator/lib/resourcemerge" - apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - apiextclientv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" - apiextlistersv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" -) - -func ApplyCustomResourceDefinition(client apiextclientv1beta1.CustomResourceDefinitionsGetter, required *apiextv1beta1.CustomResourceDefinition) (*apiextv1beta1.CustomResourceDefinition, bool, error) { - existing, err := client.CustomResourceDefinitions().Get(required.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - actual, err := client.CustomResourceDefinitions().Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - modified := pointer.BoolPtr(false) - resourcemerge.EnsureCustomResourceDefinition(modified, existing, *required) - if !*modified { - return existing, false, nil - } - - actual, err := client.CustomResourceDefinitions().Update(existing) - return actual, true, err -} - -func ApplyCustomResourceDefinitionFromCache(lister apiextlistersv1beta1.CustomResourceDefinitionLister, client apiextclientv1beta1.CustomResourceDefinitionsGetter, required *apiextv1beta1.CustomResourceDefinition) (*apiextv1beta1.CustomResourceDefinition, bool, error) { - existing, err := lister.Get(required.Name) - if apierrors.IsNotFound(err) { - actual, err := client.CustomResourceDefinitions().Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - existing = existing.DeepCopy() - modified := pointer.BoolPtr(false) - resourcemerge.EnsureCustomResourceDefinition(modified, existing, *required) - if !*modified { - return existing, false, nil - } - - actual, err := client.CustomResourceDefinitions().Update(existing) - return actual, true, err -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apireg.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apireg.go deleted file mode 100644 index dc87e7e1e041e0fcff5333056d81e1c70fbc2e2d..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apireg.go +++ /dev/null @@ -1,34 +0,0 @@ -package resourceapply - -import ( - "github.com/openshift/cluster-version-operator/lib/resourcemerge" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - apiregv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" - apiregclientv1 "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1" - "k8s.io/utils/pointer" -) - -func ApplyAPIService(client apiregclientv1.APIServicesGetter, required *apiregv1.APIService) (*apiregv1.APIService, bool, error) { - existing, err := client.APIServices().Get(required.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - actual, err := client.APIServices().Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - modified := pointer.BoolPtr(false) - resourcemerge.EnsureAPIService(modified, existing, *required) - if !*modified { - return existing, false, nil - } - - actual, err := client.APIServices().Update(existing) - return actual, true, err -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apps.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apps.go deleted file mode 100644 index 334eb208096f102b01b7fabcb3241170ee6f0e19..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apps.go +++ /dev/null @@ -1,113 +0,0 @@ -package resourceapply - -import ( - "github.com/openshift/cluster-version-operator/lib/resourcemerge" - appsv1 "k8s.io/api/apps/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - appsclientv1 "k8s.io/client-go/kubernetes/typed/apps/v1" - appslisterv1 "k8s.io/client-go/listers/apps/v1" - "k8s.io/utils/pointer" -) - -// ApplyDeployment applies the required deployment to the cluster. -func ApplyDeployment(client appsclientv1.DeploymentsGetter, required *appsv1.Deployment) (*appsv1.Deployment, bool, error) { - existing, err := client.Deployments(required.Namespace).Get(required.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - actual, err := client.Deployments(required.Namespace).Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - modified := pointer.BoolPtr(false) - resourcemerge.EnsureDeployment(modified, existing, *required) - if !*modified { - return existing, false, nil - } - - actual, err := client.Deployments(required.Namespace).Update(existing) - return actual, true, err -} - -// ApplyDeploymentFromCache applies the required deployment to the cluster. -func ApplyDeploymentFromCache(lister appslisterv1.DeploymentLister, client appsclientv1.DeploymentsGetter, required *appsv1.Deployment) (*appsv1.Deployment, bool, error) { - existing, err := lister.Deployments(required.Namespace).Get(required.Name) - if apierrors.IsNotFound(err) { - actual, err := client.Deployments(required.Namespace).Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - existing = existing.DeepCopy() - modified := pointer.BoolPtr(false) - resourcemerge.EnsureDeployment(modified, existing, *required) - if !*modified { - return existing, false, nil - } - - actual, err := client.Deployments(required.Namespace).Update(existing) - return actual, true, err -} - -// ApplyDaemonSet applies the required daemonset to the cluster. -func ApplyDaemonSet(client appsclientv1.DaemonSetsGetter, required *appsv1.DaemonSet) (*appsv1.DaemonSet, bool, error) { - existing, err := client.DaemonSets(required.Namespace).Get(required.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - actual, err := client.DaemonSets(required.Namespace).Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - modified := pointer.BoolPtr(false) - resourcemerge.EnsureDaemonSet(modified, existing, *required) - if !*modified { - return existing, false, nil - } - - actual, err := client.DaemonSets(required.Namespace).Update(existing) - return actual, true, err -} - -// ApplyDaemonSetFromCache applies the required deployment to the cluster. -func ApplyDaemonSetFromCache(lister appslisterv1.DaemonSetLister, client appsclientv1.DaemonSetsGetter, required *appsv1.DaemonSet) (*appsv1.DaemonSet, bool, error) { - existing, err := lister.DaemonSets(required.Namespace).Get(required.Name) - if apierrors.IsNotFound(err) { - actual, err := client.DaemonSets(required.Namespace).Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - existing = existing.DeepCopy() - modified := pointer.BoolPtr(false) - resourcemerge.EnsureDaemonSet(modified, existing, *required) - if !*modified { - return existing, false, nil - } - - actual, err := client.DaemonSets(required.Namespace).Update(existing) - return actual, true, err -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/batch.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/batch.go deleted file mode 100644 index 26fee6dba03e67f391f8cdab28aaa72e4e96b70b..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/batch.go +++ /dev/null @@ -1,35 +0,0 @@ -package resourceapply - -import ( - "github.com/openshift/cluster-version-operator/lib/resourcemerge" - batchv1 "k8s.io/api/batch/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - batchclientv1 "k8s.io/client-go/kubernetes/typed/batch/v1" - "k8s.io/utils/pointer" -) - -// ApplyJob applies the required Job to the cluster. -func ApplyJob(client batchclientv1.JobsGetter, required *batchv1.Job) (*batchv1.Job, bool, error) { - existing, err := client.Jobs(required.Namespace).Get(required.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - actual, err := client.Jobs(required.Namespace).Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - modified := pointer.BoolPtr(false) - resourcemerge.EnsureJob(modified, existing, *required) - if !*modified { - return existing, false, nil - } - - actual, err := client.Jobs(required.Namespace).Update(existing) - return actual, true, err -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/core.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/core.go deleted file mode 100644 index b5acd27f5d4921130524954a68106f256a7d5c14..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/core.go +++ /dev/null @@ -1,118 +0,0 @@ -package resourceapply - -import ( - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - coreclientv1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/utils/pointer" - - "github.com/openshift/cluster-version-operator/lib/resourcemerge" -) - -// ApplyNamespace merges objectmeta, does not worry about anything else -func ApplyNamespace(client coreclientv1.NamespacesGetter, required *corev1.Namespace) (*corev1.Namespace, bool, error) { - existing, err := client.Namespaces().Get(required.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - actual, err := client.Namespaces().Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - modified := pointer.BoolPtr(false) - resourcemerge.EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - if !*modified { - return existing, false, nil - } - - actual, err := client.Namespaces().Update(existing) - return actual, true, err -} - -// ApplyService merges objectmeta and requires -// TODO, since this cannot determine whether changes are due to legitimate actors (api server) or illegitimate ones (users), we cannot update -// TODO I've special cased the selector for now -func ApplyService(client coreclientv1.ServicesGetter, required *corev1.Service) (*corev1.Service, bool, error) { - existing, err := client.Services(required.Namespace).Get(required.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - actual, err := client.Services(required.Namespace).Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - modified := pointer.BoolPtr(false) - resourcemerge.EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - selectorSame := equality.Semantic.DeepEqual(existing.Spec.Selector, required.Spec.Selector) - typeSame := equality.Semantic.DeepEqual(existing.Spec.Type, required.Spec.Type) - if selectorSame && typeSame && !*modified { - return nil, false, nil - } - existing.Spec.Selector = required.Spec.Selector - existing.Spec.Type = required.Spec.Type // if this is different, the update will fail. Status will indicate it. - - actual, err := client.Services(required.Namespace).Update(existing) - return actual, true, err -} - -// ApplyServiceAccount applies the required serviceaccount to the cluster. -func ApplyServiceAccount(client coreclientv1.ServiceAccountsGetter, required *corev1.ServiceAccount) (*corev1.ServiceAccount, bool, error) { - existing, err := client.ServiceAccounts(required.Namespace).Get(required.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - actual, err := client.ServiceAccounts(required.Namespace).Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - modified := pointer.BoolPtr(false) - resourcemerge.EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - if !*modified { - return existing, false, nil - } - - actual, err := client.ServiceAccounts(required.Namespace).Update(existing) - return actual, true, err -} - -// ApplyConfigMap applies the required serviceaccount to the cluster. -func ApplyConfigMap(client coreclientv1.ConfigMapsGetter, required *corev1.ConfigMap) (*corev1.ConfigMap, bool, error) { - existing, err := client.ConfigMaps(required.Namespace).Get(required.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - actual, err := client.ConfigMaps(required.Namespace).Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - modified := pointer.BoolPtr(false) - resourcemerge.EnsureConfigMap(modified, existing, *required) - if !*modified { - return existing, false, nil - } - - actual, err := client.ConfigMaps(required.Namespace).Update(existing) - return actual, true, err -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/cv.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/cv.go deleted file mode 100644 index f671698ddbeedd5ac60eab7ea7d74cf1472cbfaf..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/cv.go +++ /dev/null @@ -1,61 +0,0 @@ -package resourceapply - -import ( - configv1 "github.com/openshift/api/config/v1" - configclientv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" - configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" - "github.com/openshift/cluster-version-operator/lib/resourcemerge" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" -) - -func ApplyClusterVersion(client configclientv1.ClusterVersionsGetter, required *configv1.ClusterVersion) (*configv1.ClusterVersion, bool, error) { - existing, err := client.ClusterVersions().Get(required.Name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - actual, err := client.ClusterVersions().Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - modified := pointer.BoolPtr(false) - resourcemerge.EnsureClusterVersion(modified, existing, *required) - if !*modified { - return existing, false, nil - } - - actual, err := client.ClusterVersions().Update(existing) - return actual, true, err -} - -func ApplyClusterVersionFromCache(lister configlistersv1.ClusterVersionLister, client configclientv1.ClusterVersionsGetter, required *configv1.ClusterVersion) (*configv1.ClusterVersion, bool, error) { - obj, err := lister.Get(required.Name) - if errors.IsNotFound(err) { - actual, err := client.ClusterVersions().Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - // Don't want to mutate cache. - existing := obj.DeepCopy() - modified := pointer.BoolPtr(false) - resourcemerge.EnsureClusterVersion(modified, existing, *required) - if !*modified { - return existing, false, nil - } - - actual, err := client.ClusterVersions().Update(existing) - return actual, true, err -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/interface.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/interface.go deleted file mode 100644 index 2f648602f2902a3cb9f29eb6217136bcc7ecd646..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/interface.go +++ /dev/null @@ -1,18 +0,0 @@ -package resourceapply - -import ( - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// CreateOnlyAnnotation means that this resource should be created if it does not exist, but should not be updated -// if it already exists. It is uniformly respected across all resources, but the first known use-cases are for -// empty config.openshift.io and initial low-level operator resources. -// Set .metadata.annotations["release.openshift.io/create-only"]="true" to have a create-only resource. -const CreateOnlyAnnotation = "release.openshift.io/create-only" - -// IsCreateOnly takes metadata and returns true if the resource should only be created, not updated. -func IsCreateOnly(metadata metav1.Object) bool { - return strings.EqualFold(metadata.GetAnnotations()[CreateOnlyAnnotation], "true") -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/rbac.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/rbac.go deleted file mode 100644 index d7b47a003367ac310710ca9a92399570e17153d5..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/rbac.go +++ /dev/null @@ -1,110 +0,0 @@ -package resourceapply - -import ( - "github.com/openshift/cluster-version-operator/lib/resourcemerge" - rbacv1 "k8s.io/api/rbac/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - rbacclientv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" - "k8s.io/utils/pointer" -) - -// ApplyClusterRoleBinding applies the required clusterrolebinding to the cluster. -func ApplyClusterRoleBinding(client rbacclientv1.ClusterRoleBindingsGetter, required *rbacv1.ClusterRoleBinding) (*rbacv1.ClusterRoleBinding, bool, error) { - existing, err := client.ClusterRoleBindings().Get(required.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - actual, err := client.ClusterRoleBindings().Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - modified := pointer.BoolPtr(false) - resourcemerge.EnsureClusterRoleBinding(modified, existing, *required) - if !*modified { - return existing, false, nil - } - - actual, err := client.ClusterRoleBindings().Update(existing) - return actual, true, err -} - -// ApplyClusterRole applies the required clusterrole to the cluster. -func ApplyClusterRole(client rbacclientv1.ClusterRolesGetter, required *rbacv1.ClusterRole) (*rbacv1.ClusterRole, bool, error) { - existing, err := client.ClusterRoles().Get(required.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - actual, err := client.ClusterRoles().Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - modified := pointer.BoolPtr(false) - resourcemerge.EnsureClusterRole(modified, existing, *required) - if !*modified { - return existing, false, nil - } - - actual, err := client.ClusterRoles().Update(existing) - return actual, true, err -} - -// ApplyRoleBinding applies the required clusterrolebinding to the cluster. -func ApplyRoleBinding(client rbacclientv1.RoleBindingsGetter, required *rbacv1.RoleBinding) (*rbacv1.RoleBinding, bool, error) { - existing, err := client.RoleBindings(required.Namespace).Get(required.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - actual, err := client.RoleBindings(required.Namespace).Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - modified := pointer.BoolPtr(false) - resourcemerge.EnsureRoleBinding(modified, existing, *required) - if !*modified { - return existing, false, nil - } - - actual, err := client.RoleBindings(required.Namespace).Update(existing) - return actual, true, err -} - -// ApplyRole applies the required clusterrole to the cluster. -func ApplyRole(client rbacclientv1.RolesGetter, required *rbacv1.Role) (*rbacv1.Role, bool, error) { - existing, err := client.Roles(required.Namespace).Get(required.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - actual, err := client.Roles(required.Namespace).Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - modified := pointer.BoolPtr(false) - resourcemerge.EnsureRole(modified, existing, *required) - if !*modified { - return existing, false, nil - } - - actual, err := client.Roles(required.Namespace).Update(existing) - return actual, true, err -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/security.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/security.go deleted file mode 100644 index 39c574bf4c88b3e4e8224c0e0298915cae2aaa53..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/security.go +++ /dev/null @@ -1,35 +0,0 @@ -package resourceapply - -import ( - securityv1 "github.com/openshift/api/security/v1" - securityclientv1 "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1" - "github.com/openshift/cluster-version-operator/lib/resourcemerge" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" -) - -// ApplySecurityContextConstraints applies the required SecurityContextConstraints to the cluster. -func ApplySecurityContextConstraints(client securityclientv1.SecurityContextConstraintsGetter, required *securityv1.SecurityContextConstraints) (*securityv1.SecurityContextConstraints, bool, error) { - existing, err := client.SecurityContextConstraints().Get(required.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - actual, err := client.SecurityContextConstraints().Create(required) - return actual, true, err - } - if err != nil { - return nil, false, err - } - // if we only create this resource, we have no need to continue further - if IsCreateOnly(required) { - return nil, false, nil - } - - modified := pointer.BoolPtr(false) - resourcemerge.EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - if !*modified { - return existing, false, nil - } - - actual, err := client.SecurityContextConstraints().Update(existing) - return actual, true, err -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apiext.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apiext.go deleted file mode 100644 index 32e4043f6268a148de2c5720bceb4e194b8e5aa5..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apiext.go +++ /dev/null @@ -1,18 +0,0 @@ -package resourcemerge - -import ( - apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apimachinery/pkg/api/equality" -) - -// EnsureCustomResourceDefinition ensures that the existing matches the required. -// modified is set to true when existing had to be updated with required. -func EnsureCustomResourceDefinition(modified *bool, existing *apiextv1beta1.CustomResourceDefinition, required apiextv1beta1.CustomResourceDefinition) { - EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - - // we stomp everything - if !equality.Semantic.DeepEqual(existing.Spec, required.Spec) { - *modified = true - existing.Spec = required.Spec - } -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apireg.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apireg.go deleted file mode 100644 index d1c58e8b5dc0dcbbb9467d8360e4c1ba319a50e6..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apireg.go +++ /dev/null @@ -1,18 +0,0 @@ -package resourcemerge - -import ( - "k8s.io/apimachinery/pkg/api/equality" - apiregv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" -) - -// EnsureAPIService ensures that the existing matches the required. -// modified is set to true when existing had to be updated with required. -func EnsureAPIService(modified *bool, existing *apiregv1.APIService, required apiregv1.APIService) { - EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - - // we stomp everything - if !equality.Semantic.DeepEqual(existing.Spec, required.Spec) { - *modified = true - existing.Spec = required.Spec - } -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apps.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apps.go deleted file mode 100644 index f7511d189a98e90db162145266b91f2adbc1849e..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apps.go +++ /dev/null @@ -1,45 +0,0 @@ -package resourcemerge - -import ( - appsv1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/equality" -) - -// EnsureDeployment ensures that the existing matches the required. -// modified is set to true when existing had to be updated with required. -func EnsureDeployment(modified *bool, existing *appsv1.Deployment, required appsv1.Deployment) { - EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - - if required.Spec.Replicas != nil && required.Spec.Replicas != existing.Spec.Replicas { - *modified = true - existing.Spec.Replicas = required.Spec.Replicas - } - - if existing.Spec.Selector == nil && required.Spec.Selector != nil { - *modified = true - existing.Spec.Selector = required.Spec.Selector - } - if !equality.Semantic.DeepEqual(existing.Spec.Selector, required.Spec.Selector) { - *modified = true - existing.Spec.Selector = required.Spec.Selector - } - - ensurePodTemplateSpec(modified, &existing.Spec.Template, required.Spec.Template) -} - -// EnsureDaemonSet ensures that the existing matches the required. -// modified is set to true when existing had to be updated with required. -func EnsureDaemonSet(modified *bool, existing *appsv1.DaemonSet, required appsv1.DaemonSet) { - EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - - if existing.Spec.Selector == nil { - *modified = true - existing.Spec.Selector = required.Spec.Selector - } - if !equality.Semantic.DeepEqual(existing.Spec.Selector, required.Spec.Selector) { - *modified = true - existing.Spec.Selector = required.Spec.Selector - } - - ensurePodTemplateSpec(modified, &existing.Spec.Template, required.Spec.Template) -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/batch.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/batch.go deleted file mode 100644 index 81ab7bb98a833121fa79a91f30ee2536fdf49c7f..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/batch.go +++ /dev/null @@ -1,28 +0,0 @@ -package resourcemerge - -import ( - batchv1 "k8s.io/api/batch/v1" - "k8s.io/apimachinery/pkg/api/equality" -) - -// EnsureJob ensures that the existing matches the required. -// modified is set to true when existing had to be updated with required. -func EnsureJob(modified *bool, existing *batchv1.Job, required batchv1.Job) { - EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - - if existing.Spec.Selector == nil { - *modified = true - existing.Spec.Selector = required.Spec.Selector - } - if !equality.Semantic.DeepEqual(existing.Spec.Selector, required.Spec.Selector) { - *modified = true - existing.Spec.Selector = required.Spec.Selector - } - setInt32Ptr(modified, &existing.Spec.Parallelism, required.Spec.Parallelism) - setInt32Ptr(modified, &existing.Spec.Completions, required.Spec.Completions) - setInt64Ptr(modified, &existing.Spec.ActiveDeadlineSeconds, required.Spec.ActiveDeadlineSeconds) - setInt32Ptr(modified, &existing.Spec.BackoffLimit, required.Spec.BackoffLimit) - setBoolPtr(modified, &existing.Spec.ManualSelector, required.Spec.ManualSelector) - - ensurePodTemplateSpec(modified, &existing.Spec.Template, required.Spec.Template) -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/core.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/core.go deleted file mode 100644 index 0c77b3c33c86d02060e03facb76661087796238f..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/core.go +++ /dev/null @@ -1,507 +0,0 @@ -package resourcemerge - -import ( - "reflect" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" -) - -// EnsureConfigMap ensures that the existing matches the required. -// modified is set to true when existing had to be updated with required. -func EnsureConfigMap(modified *bool, existing *corev1.ConfigMap, required corev1.ConfigMap) { - EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - - mergeMap(modified, &existing.Data, required.Data) -} - -// ensurePodTemplateSpec ensures that the existing matches the required. -// modified is set to true when existing had to be updated with required. -func ensurePodTemplateSpec(modified *bool, existing *corev1.PodTemplateSpec, required corev1.PodTemplateSpec) { - EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - - ensurePodSpec(modified, &existing.Spec, required.Spec) -} - -func ensurePodSpec(modified *bool, existing *corev1.PodSpec, required corev1.PodSpec) { - ensureContainers(modified, &existing.InitContainers, required.InitContainers) - ensureContainers(modified, &existing.Containers, required.Containers) - - // any volume we specify, we require. - for _, required := range required.Volumes { - var existingCurr *corev1.Volume - for j, curr := range existing.Volumes { - if curr.Name == required.Name { - existingCurr = &existing.Volumes[j] - break - } - } - if existingCurr == nil { - *modified = true - existing.Volumes = append(existing.Volumes, corev1.Volume{}) - existingCurr = &existing.Volumes[len(existing.Volumes)-1] - } - ensureVolume(modified, existingCurr, required) - } - - if len(required.RestartPolicy) > 0 { - if existing.RestartPolicy != required.RestartPolicy { - *modified = true - existing.RestartPolicy = required.RestartPolicy - } - } - - setStringIfSet(modified, &existing.ServiceAccountName, required.ServiceAccountName) - setBool(modified, &existing.HostNetwork, required.HostNetwork) - mergeMap(modified, &existing.NodeSelector, required.NodeSelector) - ensurePodSecurityContextPtr(modified, &existing.SecurityContext, required.SecurityContext) - ensureAffinityPtr(modified, &existing.Affinity, required.Affinity) - ensureTolerations(modified, &existing.Tolerations, required.Tolerations) - setStringIfSet(modified, &existing.PriorityClassName, required.PriorityClassName) - setInt32Ptr(modified, &existing.Priority, required.Priority) -} - -func ensureContainers(modified *bool, existing *[]corev1.Container, required []corev1.Container) { - for i, existingContainer := range *existing { - var existingCurr *corev1.Container - for _, requiredContainer := range required { - if existingContainer.Name == requiredContainer.Name { - existingCurr = &(*existing)[i] - ensureContainer(modified, existingCurr, requiredContainer) - break - } - } - if existingCurr == nil { - *modified = true - *existing = append((*existing)[:i], (*existing)[i+1:]...) - } - } - - for _, requiredContainer := range required { - match := false - for _, existingContainer := range *existing { - if existingContainer.Name == requiredContainer.Name { - match = true - break - } - } - if !match { - *modified = true - *existing = append(*existing, requiredContainer) - } - } -} - -func ensureContainer(modified *bool, existing *corev1.Container, required corev1.Container) { - setStringIfSet(modified, &existing.Name, required.Name) - setStringIfSet(modified, &existing.Image, required.Image) - - // if you want modify the launch, you need to modify it in the config, not in the launch args - setStringSlice(modified, &existing.Command, required.Command) - setStringSlice(modified, &existing.Args, required.Args) - ensureEnvVar(modified, &existing.Env, required.Env) - ensureEnvFromSource(modified, &existing.EnvFrom, required.EnvFrom) - setStringIfSet(modified, &existing.WorkingDir, required.WorkingDir) - - // any port we specify, we require - for _, required := range required.Ports { - var existingCurr *corev1.ContainerPort - for j, curr := range existing.Ports { - if curr.Name == required.Name { - existingCurr = &existing.Ports[j] - break - } - } - if existingCurr == nil { - *modified = true - existing.Ports = append(existing.Ports, corev1.ContainerPort{}) - existingCurr = &existing.Ports[len(existing.Ports)-1] - } - ensureContainerPort(modified, existingCurr, required) - } - - // any volume mount we specify, we require - for _, required := range required.VolumeMounts { - var existingCurr *corev1.VolumeMount - for j, curr := range existing.VolumeMounts { - if curr.Name == required.Name { - existingCurr = &existing.VolumeMounts[j] - break - } - } - if existingCurr == nil { - *modified = true - existing.VolumeMounts = append(existing.VolumeMounts, corev1.VolumeMount{}) - existingCurr = &existing.VolumeMounts[len(existing.VolumeMounts)-1] - } - ensureVolumeMount(modified, existingCurr, required) - } - - if required.LivenessProbe != nil { - ensureProbePtr(modified, &existing.LivenessProbe, required.LivenessProbe) - } - if required.ReadinessProbe != nil { - ensureProbePtr(modified, &existing.ReadinessProbe, required.ReadinessProbe) - } - - // our security context should always win - ensureSecurityContextPtr(modified, &existing.SecurityContext, required.SecurityContext) -} - -func ensureEnvVar(modified *bool, existing *[]corev1.EnvVar, required []corev1.EnvVar) { - if required == nil { - return - } - if !equality.Semantic.DeepEqual(required, *existing) { - *existing = required - *modified = true - } -} - -func ensureEnvFromSource(modified *bool, existing *[]corev1.EnvFromSource, required []corev1.EnvFromSource) { - if required == nil { - return - } - if !equality.Semantic.DeepEqual(required, *existing) { - *existing = required - *modified = true - } -} - -func ensureProbePtr(modified *bool, existing **corev1.Probe, required *corev1.Probe) { - // if we have no required, then we don't care what someone else has set - if required == nil { - return - } - if *existing == nil { - *modified = true - *existing = required - return - } - ensureProbe(modified, *existing, *required) -} - -func ensureProbe(modified *bool, existing *corev1.Probe, required corev1.Probe) { - setInt32(modified, &existing.InitialDelaySeconds, required.InitialDelaySeconds) - - ensureProbeHandler(modified, &existing.Handler, required.Handler) -} - -func ensureProbeHandler(modified *bool, existing *corev1.Handler, required corev1.Handler) { - if !equality.Semantic.DeepEqual(required, *existing) { - *modified = true - *existing = required - } -} - -func ensureContainerPort(modified *bool, existing *corev1.ContainerPort, required corev1.ContainerPort) { - if !equality.Semantic.DeepEqual(required, *existing) { - *modified = true - *existing = required - } -} - -func ensureVolumeMount(modified *bool, existing *corev1.VolumeMount, required corev1.VolumeMount) { - if !equality.Semantic.DeepEqual(required, *existing) { - *modified = true - *existing = required - } -} - -func ensureVolume(modified *bool, existing *corev1.Volume, required corev1.Volume) { - if !equality.Semantic.DeepEqual(required, *existing) { - *modified = true - *existing = required - } -} - -func ensureSecurityContextPtr(modified *bool, existing **corev1.SecurityContext, required *corev1.SecurityContext) { - // if we have no required, then we don't care what someone else has set - if required == nil { - return - } - - if *existing == nil { - *modified = true - *existing = required - return - } - ensureSecurityContext(modified, *existing, *required) -} - -func ensureSecurityContext(modified *bool, existing *corev1.SecurityContext, required corev1.SecurityContext) { - ensureCapabilitiesPtr(modified, &existing.Capabilities, required.Capabilities) - ensureSELinuxOptionsPtr(modified, &existing.SELinuxOptions, required.SELinuxOptions) - setBoolPtr(modified, &existing.Privileged, required.Privileged) - setInt64Ptr(modified, &existing.RunAsUser, required.RunAsUser) - setBoolPtr(modified, &existing.RunAsNonRoot, required.RunAsNonRoot) - setBoolPtr(modified, &existing.ReadOnlyRootFilesystem, required.ReadOnlyRootFilesystem) - setBoolPtr(modified, &existing.AllowPrivilegeEscalation, required.AllowPrivilegeEscalation) -} - -func ensureCapabilitiesPtr(modified *bool, existing **corev1.Capabilities, required *corev1.Capabilities) { - // if we have no required, then we don't care what someone else has set - if required == nil { - return - } - - if *existing == nil { - *modified = true - *existing = required - return - } - ensureCapabilities(modified, *existing, *required) -} - -func ensureCapabilities(modified *bool, existing *corev1.Capabilities, required corev1.Capabilities) { - // any Add we specify, we require. - for _, required := range required.Add { - found := false - for _, curr := range existing.Add { - if equality.Semantic.DeepEqual(curr, required) { - found = true - break - } - } - if !found { - *modified = true - existing.Add = append(existing.Add, required) - } - } - - // any Drop we specify, we require. - for _, required := range required.Drop { - found := false - for _, curr := range existing.Drop { - if equality.Semantic.DeepEqual(curr, required) { - found = true - break - } - } - if !found { - *modified = true - existing.Drop = append(existing.Drop, required) - } - } -} - -func setStringSliceIfSet(modified *bool, existing *[]string, required []string) { - if required == nil { - return - } - if !equality.Semantic.DeepEqual(required, *existing) { - *existing = required - *modified = true - } -} - -func setStringSlice(modified *bool, existing *[]string, required []string) { - if !reflect.DeepEqual(required, *existing) { - *existing = required - *modified = true - } -} - -func mergeStringSlice(modified *bool, existing *[]string, required []string) { - for _, required := range required { - found := false - for _, curr := range *existing { - if required == curr { - found = true - break - } - } - if !found { - *modified = true - *existing = append(*existing, required) - } - } -} - -func ensureTolerations(modified *bool, existing *[]corev1.Toleration, required []corev1.Toleration) { - for ridx := range required { - found := false - for eidx := range *existing { - if required[ridx].Key == (*existing)[eidx].Key { - found = true - if !equality.Semantic.DeepEqual((*existing)[eidx], required[ridx]) { - *modified = true - (*existing)[eidx] = required[ridx] - } - break - } - } - if !found { - *modified = true - *existing = append(*existing, required[ridx]) - } - } -} - -func ensureAffinityPtr(modified *bool, existing **corev1.Affinity, required *corev1.Affinity) { - // if we have no required, then we don't care what someone else has set - if required == nil { - return - } - - if *existing == nil { - *modified = true - *existing = required - return - } - ensureAffinity(modified, *existing, *required) -} - -func ensureAffinity(modified *bool, existing *corev1.Affinity, required corev1.Affinity) { - if !equality.Semantic.DeepEqual(existing.NodeAffinity, required.NodeAffinity) { - *modified = true - (*existing).NodeAffinity = required.NodeAffinity - } - if !equality.Semantic.DeepEqual(existing.PodAffinity, required.PodAffinity) { - *modified = true - (*existing).PodAffinity = required.PodAffinity - } - if !equality.Semantic.DeepEqual(existing.PodAntiAffinity, required.PodAntiAffinity) { - *modified = true - (*existing).PodAntiAffinity = required.PodAntiAffinity - } -} - -func ensurePodSecurityContextPtr(modified *bool, existing **corev1.PodSecurityContext, required *corev1.PodSecurityContext) { - // if we have no required, then we don't care what someone else has set - if required == nil { - return - } - - if *existing == nil { - *modified = true - *existing = required - return - } - ensurePodSecurityContext(modified, *existing, *required) -} - -func ensurePodSecurityContext(modified *bool, existing *corev1.PodSecurityContext, required corev1.PodSecurityContext) { - ensureSELinuxOptionsPtr(modified, &existing.SELinuxOptions, required.SELinuxOptions) - setInt64Ptr(modified, &existing.RunAsUser, required.RunAsUser) - setInt64Ptr(modified, &existing.RunAsGroup, required.RunAsGroup) - setBoolPtr(modified, &existing.RunAsNonRoot, required.RunAsNonRoot) - - // any SupplementalGroups we specify, we require. - for _, required := range required.SupplementalGroups { - found := false - for _, curr := range existing.SupplementalGroups { - if curr == required { - found = true - break - } - } - if !found { - *modified = true - existing.SupplementalGroups = append(existing.SupplementalGroups, required) - } - } - - setInt64Ptr(modified, &existing.FSGroup, required.FSGroup) - - // any SupplementalGroups we specify, we require. - for _, required := range required.Sysctls { - found := false - for j, curr := range existing.Sysctls { - if curr.Name == required.Name { - found = true - if curr.Value != required.Value { - *modified = true - existing.Sysctls[j] = required - } - break - } - } - if !found { - *modified = true - existing.Sysctls = append(existing.Sysctls, required) - } - } -} - -func ensureSELinuxOptionsPtr(modified *bool, existing **corev1.SELinuxOptions, required *corev1.SELinuxOptions) { - // if we have no required, then we don't care what someone else has set - if required == nil { - return - } - - if *existing == nil { - *modified = true - *existing = required - return - } - ensureSELinuxOptions(modified, *existing, *required) -} - -func ensureSELinuxOptions(modified *bool, existing *corev1.SELinuxOptions, required corev1.SELinuxOptions) { - setStringIfSet(modified, &existing.User, required.User) - setStringIfSet(modified, &existing.Role, required.Role) - setStringIfSet(modified, &existing.Type, required.Type) - setStringIfSet(modified, &existing.Level, required.Level) -} - -func setBool(modified *bool, existing *bool, required bool) { - if required != *existing { - *existing = required - *modified = true - } -} - -func setBoolPtr(modified *bool, existing **bool, required *bool) { - // if we have no required, then we don't care what someone else has set - if required == nil { - return - } - - if *existing == nil { - *modified = true - *existing = required - return - } - setBool(modified, *existing, *required) -} - -func setInt32(modified *bool, existing *int32, required int32) { - if required != *existing { - *existing = required - *modified = true - } -} - -func setInt32Ptr(modified *bool, existing **int32, required *int32) { - if *existing == nil && required == nil { - return - } - if *existing == nil || (required == nil && *existing != nil) { - *modified = true - *existing = required - return - } - setInt32(modified, *existing, *required) -} - -func setInt64(modified *bool, existing *int64, required int64) { - if required != *existing { - *existing = required - *modified = true - } -} - -func setInt64Ptr(modified *bool, existing **int64, required *int64) { - // if we have no required, then we don't care what someone else has set - if required == nil { - return - } - - if *existing == nil { - *modified = true - *existing = required - return - } - setInt64(modified, *existing, *required) -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/cv.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/cv.go deleted file mode 100644 index 0322532613bbfa87d01360ad152d4c5ee7ce069c..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/cv.go +++ /dev/null @@ -1,39 +0,0 @@ -package resourcemerge - -import ( - configv1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/api/equality" -) - -func EnsureClusterVersion(modified *bool, existing *configv1.ClusterVersion, required configv1.ClusterVersion) { - EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - if !equality.Semantic.DeepEqual(existing.Spec.Upstream, required.Spec.Upstream) { - *modified = true - existing.Spec.Upstream = required.Spec.Upstream - } - if existing.Spec.Channel != required.Spec.Channel { - *modified = true - existing.Spec.Channel = required.Spec.Channel - } - if existing.Spec.ClusterID != required.Spec.ClusterID { - *modified = true - existing.Spec.ClusterID = required.Spec.ClusterID - } - - if !equality.Semantic.DeepEqual(existing.Spec.DesiredUpdate, required.Spec.DesiredUpdate) { - *modified = true - if required.Spec.DesiredUpdate != nil { - copied := *required.Spec.DesiredUpdate - existing.Spec.DesiredUpdate = &copied - } else { - existing.Spec.DesiredUpdate = nil - } - } -} - -func EnsureClusterVersionStatus(modified *bool, existing *configv1.ClusterVersion, required configv1.ClusterVersion) { - if !equality.Semantic.DeepEqual(existing.Status, required.Status) { - *modified = true - existing.Status = *required.Status.DeepCopy() - } -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/meta.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/meta.go deleted file mode 100644 index 5b1771b916cc844b15170666d4d72498b4399a8a..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/meta.go +++ /dev/null @@ -1,61 +0,0 @@ -package resourcemerge - -import ( - "k8s.io/apimachinery/pkg/api/equality" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// EnsureObjectMeta ensures that the existing matches the required. -// modified is set to true when existing had to be updated with required. -func EnsureObjectMeta(modified *bool, existing *metav1.ObjectMeta, required metav1.ObjectMeta) { - setStringIfSet(modified, &existing.Namespace, required.Namespace) - setStringIfSet(modified, &existing.Name, required.Name) - mergeMap(modified, &existing.Labels, required.Labels) - mergeMap(modified, &existing.Annotations, required.Annotations) - mergeOwnerRefs(modified, &existing.OwnerReferences, required.OwnerReferences) -} - -func setStringIfSet(modified *bool, existing *string, required string) { - if len(required) == 0 { - return - } - if required != *existing { - *existing = required - *modified = true - } -} - -func mergeMap(modified *bool, existing *map[string]string, required map[string]string) { - if *existing == nil { - if required == nil { - return - } - *existing = map[string]string{} - } - for k, v := range required { - if existingV, ok := (*existing)[k]; !ok || v != existingV { - *modified = true - (*existing)[k] = v - } - } -} - -func mergeOwnerRefs(modified *bool, existing *[]metav1.OwnerReference, required []metav1.OwnerReference) { - for ridx := range required { - found := false - for eidx := range *existing { - if required[ridx].UID == (*existing)[eidx].UID { - found = true - if !equality.Semantic.DeepEqual((*existing)[eidx], required[ridx]) { - *modified = true - (*existing)[eidx] = required[ridx] - } - break - } - } - if !found { - *modified = true - *existing = append(*existing, required[ridx]) - } - } -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/os.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/os.go deleted file mode 100644 index d2b489c6b25306115a193d956380af8d22c4df2f..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/os.go +++ /dev/null @@ -1,114 +0,0 @@ -package resourcemerge - -import ( - "time" - - "k8s.io/apimachinery/pkg/api/equality" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - configv1 "github.com/openshift/api/config/v1" -) - -func EnsureClusterOperatorStatus(modified *bool, existing *configv1.ClusterOperator, required configv1.ClusterOperator) { - EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - ensureClusterOperatorStatus(modified, &existing.Status, required.Status) -} - -func ensureClusterOperatorStatus(modified *bool, existing *configv1.ClusterOperatorStatus, required configv1.ClusterOperatorStatus) { - if !equality.Semantic.DeepEqual(existing.Conditions, required.Conditions) { - *modified = true - existing.Conditions = required.Conditions - } - - if !equality.Semantic.DeepEqual(existing.Versions, required.Versions) { - *modified = true - existing.Versions = required.Versions - } - if !equality.Semantic.DeepEqual(existing.Extension.Raw, required.Extension.Raw) { - *modified = true - existing.Extension.Raw = required.Extension.Raw - } - if !equality.Semantic.DeepEqual(existing.Extension.Object, required.Extension.Object) { - *modified = true - existing.Extension.Object = required.Extension.Object - } - if !equality.Semantic.DeepEqual(existing.RelatedObjects, required.RelatedObjects) { - *modified = true - existing.RelatedObjects = required.RelatedObjects - } -} - -func SetOperatorStatusCondition(conditions *[]configv1.ClusterOperatorStatusCondition, newCondition configv1.ClusterOperatorStatusCondition) { - if conditions == nil { - conditions = &[]configv1.ClusterOperatorStatusCondition{} - } - existingCondition := FindOperatorStatusCondition(*conditions, newCondition.Type) - if existingCondition == nil { - newCondition.LastTransitionTime = metav1.NewTime(time.Now()) - *conditions = append(*conditions, newCondition) - return - } - - if existingCondition.Status != newCondition.Status { - existingCondition.Status = newCondition.Status - existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) - } - - existingCondition.Reason = newCondition.Reason - existingCondition.Message = newCondition.Message -} - -func RemoveOperatorStatusCondition(conditions *[]configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) { - if conditions == nil { - conditions = &[]configv1.ClusterOperatorStatusCondition{} - } - newConditions := []configv1.ClusterOperatorStatusCondition{} - for _, condition := range *conditions { - if condition.Type != conditionType { - newConditions = append(newConditions, condition) - } - } - - *conditions = newConditions -} - -func FindOperatorStatusCondition(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) *configv1.ClusterOperatorStatusCondition { - for i := range conditions { - if conditions[i].Type == conditionType { - return &conditions[i] - } - } - - return nil -} - -func IsOperatorStatusConditionTrue(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) bool { - return IsOperatorStatusConditionPresentAndEqual(conditions, conditionType, configv1.ConditionTrue) -} - -func IsOperatorStatusConditionFalse(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) bool { - return IsOperatorStatusConditionPresentAndEqual(conditions, conditionType, configv1.ConditionFalse) -} - -func IsOperatorStatusConditionPresentAndEqual(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType, status configv1.ConditionStatus) bool { - for _, condition := range conditions { - if condition.Type == conditionType { - return condition.Status == status - } - } - return false -} - -func IsOperatorStatusConditionNotIn(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType, status ...configv1.ConditionStatus) bool { - for _, condition := range conditions { - if condition.Type == conditionType { - for _, s := range status { - if s == condition.Status { - return false - } - } - return true - } - } - return true -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/rbac.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/rbac.go deleted file mode 100644 index e39e843addb10ee102be26fc5616f3aa7c7a37a3..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/rbac.go +++ /dev/null @@ -1,54 +0,0 @@ -package resourcemerge - -import ( - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/equality" -) - -// EnsureClusterRoleBinding ensures that the existing matches the required. -// modified is set to true when existing had to be updated with required. -func EnsureClusterRoleBinding(modified *bool, existing *rbacv1.ClusterRoleBinding, required rbacv1.ClusterRoleBinding) { - EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - if !equality.Semantic.DeepEqual(existing.Subjects, required.Subjects) { - *modified = true - existing.Subjects = required.Subjects - } - if !equality.Semantic.DeepEqual(existing.RoleRef, required.RoleRef) { - *modified = true - existing.RoleRef = required.RoleRef - } -} - -// EnsureClusterRole ensures that the existing matches the required. -// modified is set to true when existing had to be updated with required. -func EnsureClusterRole(modified *bool, existing *rbacv1.ClusterRole, required rbacv1.ClusterRole) { - EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - if !equality.Semantic.DeepEqual(existing.Rules, required.Rules) { - *modified = true - existing.Rules = required.Rules - } -} - -// EnsureRoleBinding ensures that the existing matches the required. -// modified is set to true when existing had to be updated with required. -func EnsureRoleBinding(modified *bool, existing *rbacv1.RoleBinding, required rbacv1.RoleBinding) { - EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - if !equality.Semantic.DeepEqual(existing.Subjects, required.Subjects) { - *modified = true - existing.Subjects = required.Subjects - } - if !equality.Semantic.DeepEqual(existing.RoleRef, required.RoleRef) { - *modified = true - existing.RoleRef = required.RoleRef - } -} - -// EnsureRole ensures that the existing matches the required. -// modified is set to true when existing had to be updated with required. -func EnsureRole(modified *bool, existing *rbacv1.Role, required rbacv1.Role) { - EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - if !equality.Semantic.DeepEqual(existing.Rules, required.Rules) { - *modified = true - existing.Rules = required.Rules - } -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/security.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/security.go deleted file mode 100644 index 9ad63dddfcce4347e32cd186f47182c6d02ca4bc..0000000000000000000000000000000000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/security.go +++ /dev/null @@ -1,108 +0,0 @@ -package resourcemerge - -import ( - securityv1 "github.com/openshift/api/security/v1" - "k8s.io/apimachinery/pkg/api/equality" -) - -// EnsureSecurityContextConstraints ensures that the existing matches the required. -// modified is set to true when existing had to be updated with required. -func EnsureSecurityContextConstraints(modified *bool, existing *securityv1.SecurityContextConstraints, required securityv1.SecurityContextConstraints) { - EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) - setInt32Ptr(modified, &existing.Priority, required.Priority) - setBool(modified, &existing.AllowPrivilegedContainer, required.AllowPrivilegedContainer) - for _, required := range required.DefaultAddCapabilities { - found := false - for _, curr := range existing.DefaultAddCapabilities { - if equality.Semantic.DeepEqual(required, curr) { - found = true - break - } - } - if !found { - *modified = true - existing.DefaultAddCapabilities = append(existing.DefaultAddCapabilities, required) - } - } - for _, required := range required.RequiredDropCapabilities { - found := false - for _, curr := range existing.RequiredDropCapabilities { - if equality.Semantic.DeepEqual(required, curr) { - found = true - break - } - } - if !found { - *modified = true - existing.RequiredDropCapabilities = append(existing.RequiredDropCapabilities, required) - } - } - for _, required := range required.AllowedCapabilities { - found := false - for _, curr := range existing.AllowedCapabilities { - if equality.Semantic.DeepEqual(required, curr) { - found = true - break - } - } - if !found { - *modified = true - existing.AllowedCapabilities = append(existing.AllowedCapabilities, required) - } - } - setBool(modified, &existing.AllowHostDirVolumePlugin, required.AllowHostDirVolumePlugin) - for _, required := range required.Volumes { - found := false - for _, curr := range existing.Volumes { - if equality.Semantic.DeepEqual(required, curr) { - found = true - break - } - } - if !found { - *modified = true - existing.Volumes = append(existing.Volumes, required) - } - } - for _, required := range required.AllowedFlexVolumes { - found := false - for _, curr := range existing.AllowedFlexVolumes { - if equality.Semantic.DeepEqual(required.Driver, curr.Driver) { - found = true - break - } - } - if !found { - *modified = true - existing.AllowedFlexVolumes = append(existing.AllowedFlexVolumes, required) - } - } - setBool(modified, &existing.AllowHostNetwork, required.AllowHostNetwork) - setBool(modified, &existing.AllowHostPorts, required.AllowHostPorts) - setBool(modified, &existing.AllowHostPID, required.AllowHostPID) - setBool(modified, &existing.AllowHostIPC, required.AllowHostIPC) - setBoolPtr(modified, &existing.DefaultAllowPrivilegeEscalation, required.DefaultAllowPrivilegeEscalation) - setBoolPtr(modified, &existing.AllowPrivilegeEscalation, required.AllowPrivilegeEscalation) - if !equality.Semantic.DeepEqual(existing.SELinuxContext, required.SELinuxContext) { - *modified = true - existing.SELinuxContext = required.SELinuxContext - } - if !equality.Semantic.DeepEqual(existing.RunAsUser, required.RunAsUser) { - *modified = true - existing.RunAsUser = required.RunAsUser - } - if !equality.Semantic.DeepEqual(existing.FSGroup, required.FSGroup) { - *modified = true - existing.FSGroup = required.FSGroup - } - if !equality.Semantic.DeepEqual(existing.SupplementalGroups, required.SupplementalGroups) { - *modified = true - existing.SupplementalGroups = required.SupplementalGroups - } - setBool(modified, &existing.ReadOnlyRootFilesystem, required.ReadOnlyRootFilesystem) - mergeStringSlice(modified, &existing.Users, required.Users) - mergeStringSlice(modified, &existing.Groups, required.Groups) - mergeStringSlice(modified, &existing.SeccompProfiles, required.SeccompProfiles) - mergeStringSlice(modified, &existing.AllowedUnsafeSysctls, required.AllowedUnsafeSysctls) - mergeStringSlice(modified, &existing.ForbiddenSysctls, required.ForbiddenSysctls) -} diff --git a/vendor/github.com/openshift/cluster-version-operator/LICENSE b/vendor/github.com/openshift/library-go/LICENSE similarity index 99% rename from vendor/github.com/openshift/cluster-version-operator/LICENSE rename to vendor/github.com/openshift/library-go/LICENSE index d645695673349e3947e8e5ae42332d0ac3164cd7..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 100644 --- a/vendor/github.com/openshift/cluster-version-operator/LICENSE +++ b/vendor/github.com/openshift/library-go/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status.go b/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status.go new file mode 100644 index 0000000000000000000000000000000000000000..c2ddfd99566e7b90a9ff2f18e62805dcb3e86945 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status.go @@ -0,0 +1,140 @@ +package v1helpers + +import ( + "bytes" + "fmt" + "strings" + "time" + + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/util/json" + + configv1 "github.com/openshift/api/config/v1" +) + +// SetStatusCondition sets the corresponding condition in conditions to newCondition. +func SetStatusCondition(conditions *[]configv1.ClusterOperatorStatusCondition, newCondition configv1.ClusterOperatorStatusCondition) { + if conditions == nil { + conditions = &[]configv1.ClusterOperatorStatusCondition{} + } + existingCondition := FindStatusCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +// RemoveStatusCondition removes the corresponding conditionType from conditions. +func RemoveStatusCondition(conditions *[]configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) { + if conditions == nil { + conditions = &[]configv1.ClusterOperatorStatusCondition{} + } + newConditions := []configv1.ClusterOperatorStatusCondition{} + for _, condition := range *conditions { + if condition.Type != conditionType { + newConditions = append(newConditions, condition) + } + } + + *conditions = newConditions +} + +// FindStatusCondition finds the conditionType in conditions. +func FindStatusCondition(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) *configv1.ClusterOperatorStatusCondition { + for i := range conditions { + if conditions[i].Type == conditionType { + return &conditions[i] + } + } + + return nil +} + +// GetStatusDiff returns a string representing change in condition status in human readable form. +func GetStatusDiff(oldStatus configv1.ClusterOperatorStatus, newStatus configv1.ClusterOperatorStatus) string { + messages := []string{} + for _, newCondition := range newStatus.Conditions { + existingStatusCondition := FindStatusCondition(oldStatus.Conditions, newCondition.Type) + if existingStatusCondition == nil { + messages = append(messages, fmt.Sprintf("%s set to %s (%q)", newCondition.Type, newCondition.Status, newCondition.Message)) + continue + } + if existingStatusCondition.Status != newCondition.Status { + messages = append(messages, fmt.Sprintf("%s changed from %s to %s (%q)", existingStatusCondition.Type, existingStatusCondition.Status, newCondition.Status, newCondition.Message)) + continue + } + if existingStatusCondition.Message != newCondition.Message { + messages = append(messages, fmt.Sprintf("%s message changed from %q to %q", existingStatusCondition.Type, existingStatusCondition.Message, newCondition.Message)) + } + } + for _, oldCondition := range oldStatus.Conditions { + // This should not happen. It means we removed old condition entirely instead of just changing its status + if c := FindStatusCondition(newStatus.Conditions, oldCondition.Type); c == nil { + messages = append(messages, fmt.Sprintf("%s was removed", oldCondition.Type)) + } + } + + if !equality.Semantic.DeepEqual(oldStatus.RelatedObjects, newStatus.RelatedObjects) { + messages = append(messages, fmt.Sprintf("status.relatedObjects changed from %q to %q", oldStatus.RelatedObjects, newStatus.RelatedObjects)) + } + if !equality.Semantic.DeepEqual(oldStatus.Extension, newStatus.Extension) { + messages = append(messages, fmt.Sprintf("status.extension changed from %q to %q", oldStatus.Extension, newStatus.Extension)) + } + + if len(messages) == 0 { + // ignore errors + originalJSON := &bytes.Buffer{} + json.NewEncoder(originalJSON).Encode(oldStatus) + newJSON := &bytes.Buffer{} + json.NewEncoder(newJSON).Encode(newStatus) + messages = append(messages, diff.StringDiff(originalJSON.String(), newJSON.String())) + } + + return strings.Join(messages, ",") +} + +// IsStatusConditionTrue returns true when the conditionType is present and set to `configv1.ConditionTrue` +func IsStatusConditionTrue(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, configv1.ConditionTrue) +} + +// IsStatusConditionFalse returns true when the conditionType is present and set to `configv1.ConditionFalse` +func IsStatusConditionFalse(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, configv1.ConditionFalse) +} + +// IsStatusConditionPresentAndEqual returns true when conditionType is present and equal to status. +func IsStatusConditionPresentAndEqual(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType, status configv1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} + +// IsStatusConditionNotIn returns true when the conditionType does not match the status. +func IsStatusConditionNotIn(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType, status ...configv1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + for _, s := range status { + if s == condition.Status { + return false + } + } + return true + } + } + return true +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS b/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS new file mode 100644 index 0000000000000000000000000000000000000000..4f189b70875ff1ff4fe68fb46b37711cbbb7bd69 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS @@ -0,0 +1,8 @@ +reviewers: + - mfojtik + - deads2k + - sttts +approvers: + - mfojtik + - deads2k + - sttts diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go new file mode 100644 index 0000000000000000000000000000000000000000..0a910a13b96f5cae032ceacab502163be2a3ea51 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go @@ -0,0 +1,214 @@ +package events + +import ( + "errors" + "fmt" + "os" + "time" + + "k8s.io/client-go/kubernetes" + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// Recorder is a simple event recording interface. +type Recorder interface { + Event(reason, message string) + Eventf(reason, messageFmt string, args ...interface{}) + Warning(reason, message string) + Warningf(reason, messageFmt string, args ...interface{}) + + // ForComponent allows to fiddle the component name before sending the event to sink. + // Making more unique components will prevent the spam filter in upstream event sink from dropping + // events. + ForComponent(componentName string) Recorder + + // WithComponentSuffix is similar to ForComponent except it just suffix the current component name instead of overriding. + WithComponentSuffix(componentNameSuffix string) Recorder + + // ComponentName returns the current source component name for the event. + // This allows to suffix the original component name with 'sub-component'. + ComponentName() string +} + +// podNameEnv is a name of environment variable inside container that specifies the name of the current replica set. +// This replica set name is then used as a source/involved object for operator events. +const podNameEnv = "POD_NAME" + +// podNameEnvFunc allows to override the way we get the environment variable value (for unit tests). +var podNameEnvFunc = func() string { + return os.Getenv(podNameEnv) +} + +// GetControllerReferenceForCurrentPod provides an object reference to a controller managing the pod/container where this process runs. +// The pod name must be provided via the POD_NAME name. +// Even if this method returns an error, it always return valid reference to the namespace. It allows the callers to control the logging +// and decide to fail or accept the namespace. +func GetControllerReferenceForCurrentPod(client kubernetes.Interface, targetNamespace string, reference *corev1.ObjectReference) (*corev1.ObjectReference, error) { + if reference == nil { + // Try to get the pod name via POD_NAME environment variable + reference := &corev1.ObjectReference{Kind: "Pod", Name: podNameEnvFunc(), Namespace: targetNamespace} + if len(reference.Name) != 0 { + return GetControllerReferenceForCurrentPod(client, targetNamespace, reference) + } + // If that fails, lets try to guess the pod by listing all pods in namespaces and using the first pod in the list + reference, err := guessControllerReferenceForNamespace(client.CoreV1().Pods(targetNamespace)) + if err != nil { + // If this fails, do not give up with error but instead use the namespace as controller reference for the pod + // NOTE: This is last resort, if we see this often it might indicate something is wrong in the cluster. + // In some cases this might help with flakes. + return getControllerReferenceForNamespace(targetNamespace), err + } + return GetControllerReferenceForCurrentPod(client, targetNamespace, reference) + } + + switch reference.Kind { + case "Pod": + pod, err := client.CoreV1().Pods(reference.Namespace).Get(reference.Name, metav1.GetOptions{}) + if err != nil { + return getControllerReferenceForNamespace(reference.Namespace), err + } + if podController := metav1.GetControllerOf(pod); podController != nil { + return GetControllerReferenceForCurrentPod(client, targetNamespace, makeObjectReference(podController, targetNamespace)) + } + // This is a bare pod without any ownerReference + return makeObjectReference(&metav1.OwnerReference{Kind: "Pod", Name: pod.Name, UID: pod.UID, APIVersion: "v1"}, pod.Namespace), nil + case "ReplicaSet": + rs, err := client.AppsV1().ReplicaSets(reference.Namespace).Get(reference.Name, metav1.GetOptions{}) + if err != nil { + return getControllerReferenceForNamespace(reference.Namespace), err + } + if rsController := metav1.GetControllerOf(rs); rsController != nil { + return GetControllerReferenceForCurrentPod(client, targetNamespace, makeObjectReference(rsController, targetNamespace)) + } + // This is a replicaSet without any ownerReference + return reference, nil + default: + return reference, nil + } +} + +// getControllerReferenceForNamespace returns an object reference to the given namespace. +func getControllerReferenceForNamespace(targetNamespace string) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: "Namespace", + Namespace: targetNamespace, + Name: targetNamespace, + APIVersion: "v1", + } +} + +// makeObjectReference makes object reference from ownerReference and target namespace +func makeObjectReference(owner *metav1.OwnerReference, targetNamespace string) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: owner.Kind, + Namespace: targetNamespace, + Name: owner.Name, + UID: owner.UID, + APIVersion: owner.APIVersion, + } +} + +// guessControllerReferenceForNamespace tries to guess what resource to reference. +func guessControllerReferenceForNamespace(client corev1client.PodInterface) (*corev1.ObjectReference, error) { + pods, err := client.List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + if len(pods.Items) == 0 { + return nil, fmt.Errorf("unable to setup event recorder as %q env variable is not set and there are no pods", podNameEnv) + } + + for _, pod := range pods.Items { + ownerRef := metav1.GetControllerOf(&pod) + if ownerRef == nil { + continue + } + return &corev1.ObjectReference{ + Kind: ownerRef.Kind, + Namespace: pod.Namespace, + Name: ownerRef.Name, + UID: ownerRef.UID, + APIVersion: ownerRef.APIVersion, + }, nil + } + return nil, errors.New("can't guess controller ref") +} + +// NewRecorder returns new event recorder. +func NewRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return &recorder{ + eventClient: client, + involvedObjectRef: involvedObjectRef, + sourceComponent: sourceComponentName, + } +} + +// recorder is an implementation of Recorder interface. +type recorder struct { + eventClient corev1client.EventInterface + involvedObjectRef *corev1.ObjectReference + sourceComponent string +} + +func (r *recorder) ComponentName() string { + return r.sourceComponent +} + +func (r *recorder) ForComponent(componentName string) Recorder { + newRecorderForComponent := *r + newRecorderForComponent.sourceComponent = componentName + return &newRecorderForComponent +} + +func (r *recorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +// Event emits the normal type event and allow formatting of message. +func (r *recorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Warning emits the warning type event and allow formatting of message. +func (r *recorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Event emits the normal type event. +func (r *recorder) Event(reason, message string) { + event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeNormal, reason, message) + if _, err := r.eventClient.Create(event); err != nil { + klog.Warningf("Error creating event %+v: %v", event, err) + } +} + +// Warning emits the warning type event. +func (r *recorder) Warning(reason, message string) { + event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeWarning, reason, message) + if _, err := r.eventClient.Create(event); err != nil { + klog.Warningf("Error creating event %+v: %v", event, err) + } +} + +func makeEvent(involvedObjRef *corev1.ObjectReference, sourceComponent string, eventType, reason, message string) *corev1.Event { + currentTime := metav1.Time{Time: time.Now()} + event := &corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", involvedObjRef.Name, currentTime.UnixNano()), + Namespace: involvedObjRef.Namespace, + }, + InvolvedObject: *involvedObjRef, + Reason: reason, + Message: message, + Type: eventType, + Count: 1, + FirstTimestamp: currentTime, + LastTimestamp: currentTime, + } + event.Source.Component = sourceComponent + return event +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go new file mode 100644 index 0000000000000000000000000000000000000000..b64d9f6a9879ee009c807acaf0153063415af476 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go @@ -0,0 +1,77 @@ +package events + +import ( + "fmt" + "sync" + + corev1 "k8s.io/api/core/v1" + "k8s.io/klog" +) + +type inMemoryEventRecorder struct { + events []*corev1.Event + source string + sync.Mutex +} + +// inMemoryDummyObjectReference is used for fake events. +var inMemoryDummyObjectReference = corev1.ObjectReference{ + Kind: "Pod", + Namespace: "dummy", + Name: "dummy", + APIVersion: "v1", +} + +type InMemoryRecorder interface { + Events() []*corev1.Event + Recorder +} + +// NewInMemoryRecorder provides event recorder that stores all events recorded in memory and allow to replay them using the Events() method. +// This recorder should be only used in unit tests. +func NewInMemoryRecorder(sourceComponent string) InMemoryRecorder { + return &inMemoryEventRecorder{events: []*corev1.Event{}, source: sourceComponent} +} + +func (r *inMemoryEventRecorder) ComponentName() string { + return r.source +} + +func (r *inMemoryEventRecorder) ForComponent(component string) Recorder { + r.Lock() + defer r.Unlock() + r.source = component + return r +} + +func (r *inMemoryEventRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +// Events returns list of recorded events +func (r *inMemoryEventRecorder) Events() []*corev1.Event { + return r.events +} + +func (r *inMemoryEventRecorder) Event(reason, message string) { + r.Lock() + defer r.Unlock() + event := makeEvent(&inMemoryDummyObjectReference, r.source, corev1.EventTypeNormal, reason, message) + r.events = append(r.events, event) +} + +func (r *inMemoryEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *inMemoryEventRecorder) Warning(reason, message string) { + r.Lock() + defer r.Unlock() + event := makeEvent(&inMemoryDummyObjectReference, r.source, corev1.EventTypeWarning, reason, message) + klog.Info(event.String()) + r.events = append(r.events, event) +} + +func (r *inMemoryEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go new file mode 100644 index 0000000000000000000000000000000000000000..7f3b5cd8bd02cbf61bff0ef08bef7fcae4d7901f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go @@ -0,0 +1,49 @@ +package events + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/klog" +) + +type LoggingEventRecorder struct { + component string +} + +// NewLoggingEventRecorder provides event recorder that will log all recorded events via klog. +func NewLoggingEventRecorder(component string) Recorder { + return &LoggingEventRecorder{component: component} +} + +func (r *LoggingEventRecorder) ComponentName() string { + return r.component +} + +func (r *LoggingEventRecorder) ForComponent(component string) Recorder { + newRecorder := *r + newRecorder.component = component + return &newRecorder +} + +func (r *LoggingEventRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +func (r *LoggingEventRecorder) Event(reason, message string) { + event := makeEvent(&inMemoryDummyObjectReference, "", corev1.EventTypeNormal, reason, message) + klog.Info(event.String()) +} + +func (r *LoggingEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *LoggingEventRecorder) Warning(reason, message string) { + event := makeEvent(&inMemoryDummyObjectReference, "", corev1.EventTypeWarning, reason, message) + klog.Warning(event.String()) +} + +func (r *LoggingEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go new file mode 100644 index 0000000000000000000000000000000000000000..359d2eb81eaa36d9d4e8f5ec2005079c1f2443d4 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go @@ -0,0 +1,70 @@ +package events + +import ( + "fmt" + + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/scheme" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/record" +) + +// NewKubeRecorder returns new event recorder. +func NewKubeRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return (&upstreamRecorder{ + client: client, + component: sourceComponentName, + involvedObjectRef: involvedObjectRef, + }).ForComponent(sourceComponentName) +} + +// upstreamRecorder is an implementation of Recorder interface. +type upstreamRecorder struct { + client corev1client.EventInterface + component string + broadcaster record.EventBroadcaster + eventRecorder record.EventRecorder + involvedObjectRef *corev1.ObjectReference +} + +func (r *upstreamRecorder) ForComponent(componentName string) Recorder { + newRecorderForComponent := *r + broadcaster := record.NewBroadcaster() + broadcaster.StartLogging(klog.Infof) + broadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: newRecorderForComponent.client}) + + newRecorderForComponent.eventRecorder = broadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: componentName}) + newRecorderForComponent.component = componentName + + return &newRecorderForComponent +} + +func (r *upstreamRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +func (r *upstreamRecorder) ComponentName() string { + return r.component +} + +// Eventf emits the normal type event and allow formatting of message. +func (r *upstreamRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Warningf emits the warning type event and allow formatting of message. +func (r *upstreamRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Event emits the normal type event. +func (r *upstreamRecorder) Event(reason, message string) { + r.eventRecorder.Event(r.involvedObjectRef, corev1.EventTypeNormal, reason, message) +} + +// Warning emits the warning type event. +func (r *upstreamRecorder) Warning(reason, message string) { + r.eventRecorder.Event(r.involvedObjectRef, corev1.EventTypeWarning, reason, message) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiextensions.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiextensions.go new file mode 100644 index 0000000000000000000000000000000000000000..2721f8ae35f79ad8544b9c567dfe64a087beebda --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiextensions.go @@ -0,0 +1,71 @@ +package resourceapply + +import ( + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextclientv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" + apiextclientv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" +) + +// ApplyCustomResourceDefinitionV1Beta1 applies the required CustomResourceDefinition to the cluster. +func ApplyCustomResourceDefinitionV1Beta1(client apiextclientv1beta1.CustomResourceDefinitionsGetter, recorder events.Recorder, required *apiextv1beta1.CustomResourceDefinition) (*apiextv1beta1.CustomResourceDefinition, bool, error) { + existing, err := client.CustomResourceDefinitions().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.CustomResourceDefinitions().Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + resourcemerge.EnsureCustomResourceDefinitionV1Beta1(modified, existingCopy, *required) + if !*modified { + return existing, false, nil + } + + if klog.V(4) { + klog.Infof("CustomResourceDefinition %q changes: %s", existing.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.CustomResourceDefinitions().Update(existingCopy) + reportUpdateEvent(recorder, required, err) + + return actual, true, err +} + +// ApplyCustomResourceDefinitionV1 applies the required CustomResourceDefinition to the cluster. +func ApplyCustomResourceDefinitionV1(client apiextclientv1.CustomResourceDefinitionsGetter, recorder events.Recorder, required *apiextensionsv1.CustomResourceDefinition) (*apiextensionsv1.CustomResourceDefinition, bool, error) { + existing, err := client.CustomResourceDefinitions().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.CustomResourceDefinitions().Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + resourcemerge.EnsureCustomResourceDefinitionV1(modified, existingCopy, *required) + if !*modified { + return existing, false, nil + } + + if klog.V(4) { + klog.Infof("CustomResourceDefinition %q changes: %s", existing.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.CustomResourceDefinitions().Update(existingCopy) + reportUpdateEvent(recorder, required, err) + + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go new file mode 100644 index 0000000000000000000000000000000000000000..1d69176ed1e21a85f9e3df42fe8e255597110b82 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go @@ -0,0 +1,47 @@ +package resourceapply + +import ( + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + apiregistrationv1client "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyAPIService merges objectmeta and requires apiservice coordinates. It does not touch CA bundles, which should be managed via service CA controller. +func ApplyAPIService(client apiregistrationv1client.APIServicesGetter, recorder events.Recorder, required *apiregistrationv1.APIService) (*apiregistrationv1.APIService, bool, error) { + existing, err := client.APIServices().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.APIServices().Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + serviceSame := equality.Semantic.DeepEqual(existingCopy.Spec.Service, required.Spec.Service) + prioritySame := existingCopy.Spec.VersionPriority == required.Spec.VersionPriority && existingCopy.Spec.GroupPriorityMinimum == required.Spec.GroupPriorityMinimum + insecureSame := existingCopy.Spec.InsecureSkipTLSVerify == required.Spec.InsecureSkipTLSVerify + // there was no change to metadata, the service and priorities were right + if !*modified && serviceSame && prioritySame && insecureSame { + return existingCopy, false, nil + } + + existingCopy.Spec = required.Spec + + if klog.V(4) { + klog.Infof("APIService %q changes: %s", existing.Name, JSONPatchNoError(existing, existingCopy)) + } + actual, err := client.APIServices().Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go new file mode 100644 index 0000000000000000000000000000000000000000..234e069055a43f26df886caa4c0427f5f7661aec --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go @@ -0,0 +1,245 @@ +package resourceapply + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + + "k8s.io/klog" + + appsv1 "k8s.io/api/apps/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" + appsclientv1 "k8s.io/client-go/kubernetes/typed/apps/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// The Apply<type> methods in this file ensure that a resource is created or updated to match +// the form provided by the caller. +// +// If the resource does not yet exist, it will be created. +// +// If the resource exists, the metadata of the required resource will be merged with the +// existing resource and an update will be performed if the spec and metadata differ between +// the required and existing resources. To be reliable, the input of the required spec from +// the operator should be stable. It does not need to set all fields, since some fields are +// defaulted server-side. Detection of spec drift from intent by other actors is determined +// by generation, not by spec comparison. +// +// To ensure an update in response to state external to the resource spec, the caller should +// set an annotation representing that external state e.g. +// +// `myoperator.openshift.io/config-resource-version: <resourceVersion>` +// +// An update will be performed if: +// +// - The required resource metadata differs from that of the existing resource. +// - The difference will be detected by comparing the name, namespace, labels and +// annotations of the 2 resources. +// +// - The generation expected by the operator differs from generation of the existing +// resource. +// - This is the likely result of an actor other than the operator updating a resource +// managed by the operator. +// +// - The spec of the required resource differs from the spec of the existing resource. +// - The difference will be detected via metadata comparison since the hash of the +// resource's spec will be set as an annotation prior to comparison. + +const specHashAnnotation = "operator.openshift.io/spec-hash" + +// SetSpecHashAnnotation computes the hash of the provided spec and sets an annotation of the +// hash on the provided ObjectMeta. This method is used internally by Apply<type> methods, and +// is exposed to support testing with fake clients that need to know the mutated form of the +// resource resulting from an Apply<type> call. +func SetSpecHashAnnotation(objMeta *metav1.ObjectMeta, spec interface{}) error { + jsonBytes, err := json.Marshal(spec) + if err != nil { + return err + } + specHash := fmt.Sprintf("%x", sha256.Sum256(jsonBytes)) + if objMeta.Annotations == nil { + objMeta.Annotations = map[string]string{} + } + objMeta.Annotations[specHashAnnotation] = specHash + return nil +} + +// ApplyDeployment ensures the form of the specified deployment is present in the API. If it +// does not exist, it will be created. If it does exist, the metadata of the required +// deployment will be merged with the existing deployment and an update performed if the +// deployment spec and metadata differ from the previously required spec and metadata. For +// further detail, check the top-level comment. +// +// NOTE: The previous implementation of this method was renamed to +// ApplyDeploymentWithForce. If are reading this in response to a compile error due to the +// change in signature, you have the following options: +// +// - Update the calling code to rely on the spec comparison provided by the new +// implementation. If the code in question was specifying the force parameter to ensure +// rollout in response to changes in resources external to the deployment, it will need to be +// revised to set that external state as an annotation e.g. +// +// myoperator.openshift.io/my-resource: <resourceVersion> +// +// - Update the call to use ApplyDeploymentWithForce. This is available as a temporary measure +// but the method is deprecated and will be removed in 4.6. +func ApplyDeployment(client appsclientv1.DeploymentsGetter, recorder events.Recorder, + requiredOriginal *appsv1.Deployment, expectedGeneration int64) (*appsv1.Deployment, bool, error) { + + required := requiredOriginal.DeepCopy() + err := SetSpecHashAnnotation(&required.ObjectMeta, required.Spec) + if err != nil { + return nil, false, err + } + + return ApplyDeploymentWithForce(client, recorder, required, expectedGeneration, false) +} + +// ApplyDeploymentWithForce merges objectmeta and requires matching generation. It returns the final Object, whether any change as made, and an error. +// +// DEPRECATED - This method will be removed in 4.6 and callers will need to migrate to ApplyDeployment before then. +func ApplyDeploymentWithForce(client appsclientv1.DeploymentsGetter, recorder events.Recorder, requiredOriginal *appsv1.Deployment, expectedGeneration int64, + forceRollout bool) (*appsv1.Deployment, bool, error) { + + required := requiredOriginal.DeepCopy() + if required.Annotations == nil { + required.Annotations = map[string]string{} + } + if _, ok := required.Annotations[specHashAnnotation]; !ok { + // If the spec hash annotation is not present, the caller expects the + // pull-spec annotation to be applied. + required.Annotations["operator.openshift.io/pull-spec"] = required.Spec.Template.Spec.Containers[0].Image + } + existing, err := client.Deployments(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Deployments(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + // there was no change to metadata, the generation was right, and we weren't asked for force the deployment + if !*modified && existingCopy.ObjectMeta.Generation == expectedGeneration && !forceRollout { + return existingCopy, false, nil + } + + // at this point we know that we're going to perform a write. We're just trying to get the object correct + toWrite := existingCopy // shallow copy so the code reads easier + toWrite.Spec = *required.Spec.DeepCopy() + if forceRollout { + // forces a deployment + forceString := string(uuid.NewUUID()) + if toWrite.Annotations == nil { + toWrite.Annotations = map[string]string{} + } + if toWrite.Spec.Template.Annotations == nil { + toWrite.Spec.Template.Annotations = map[string]string{} + } + toWrite.Annotations["operator.openshift.io/force"] = forceString + toWrite.Spec.Template.Annotations["operator.openshift.io/force"] = forceString + } + + if klog.V(4) { + klog.Infof("Deployment %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, toWrite)) + } + + actual, err := client.Deployments(required.Namespace).Update(toWrite) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyDaemonSet ensures the form of the specified daemonset is present in the API. If it +// does not exist, it will be created. If it does exist, the metadata of the required +// daemonset will be merged with the existing daemonset and an update performed if the +// daemonset spec and metadata differ from the previously required spec and metadata. For +// further detail, check the top-level comment. +// +// NOTE: The previous implementation of this method was renamed to ApplyDaemonSetWithForce. If +// are reading this in response to a compile error due to the change in signature, you have +// the following options: +// +// - Update the calling code to rely on the spec comparison provided by the new +// implementation. If the code in question was specifying the force parameter to ensure +// rollout in response to changes in resources external to the daemonset, it will need to be +// revised to set that external state as an annotation e.g. +// +// myoperator.openshift.io/my-resource: <resourceVersion> +// +// - Update the call to use ApplyDaemonSetWithForce. This is available as a temporary measure +// but the method is deprecated and will be removed in 4.6. +func ApplyDaemonSet(client appsclientv1.DaemonSetsGetter, recorder events.Recorder, + requiredOriginal *appsv1.DaemonSet, expectedGeneration int64) (*appsv1.DaemonSet, bool, error) { + + required := requiredOriginal.DeepCopy() + err := SetSpecHashAnnotation(&required.ObjectMeta, required.Spec) + if err != nil { + return nil, false, err + } + + return ApplyDaemonSetWithForce(client, recorder, required, expectedGeneration, false) +} + +// ApplyDaemonSetWithForce merges objectmeta and requires matching generation. It returns the final Object, whether any change as made, and an error +// DEPRECATED - This method will be removed in 4.6 and callers will need to migrate to ApplyDaemonSet before then. +func ApplyDaemonSetWithForce(client appsclientv1.DaemonSetsGetter, recorder events.Recorder, requiredOriginal *appsv1.DaemonSet, expectedGeneration int64, forceRollout bool) (*appsv1.DaemonSet, bool, error) { + required := requiredOriginal.DeepCopy() + if required.Annotations == nil { + required.Annotations = map[string]string{} + } + if _, ok := required.Annotations[specHashAnnotation]; !ok { + // If the spec hash annotation is not present, the caller expects the + // pull-spec annotation to be applied. + required.Annotations["operator.openshift.io/pull-spec"] = required.Spec.Template.Spec.Containers[0].Image + } + existing, err := client.DaemonSets(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.DaemonSets(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + // there was no change to metadata, the generation was right, and we weren't asked for force the deployment + if !*modified && existingCopy.ObjectMeta.Generation == expectedGeneration && !forceRollout { + return existingCopy, false, nil + } + + // at this point we know that we're going to perform a write. We're just trying to get the object correct + toWrite := existingCopy // shallow copy so the code reads easier + toWrite.Spec = *required.Spec.DeepCopy() + if forceRollout { + // forces a deployment + forceString := string(uuid.NewUUID()) + if toWrite.Annotations == nil { + toWrite.Annotations = map[string]string{} + } + if toWrite.Spec.Template.Annotations == nil { + toWrite.Spec.Template.Annotations = map[string]string{} + } + toWrite.Annotations["operator.openshift.io/force"] = forceString + toWrite.Spec.Template.Annotations["operator.openshift.io/force"] = forceString + } + + if klog.V(4) { + klog.Infof("DaemonSet %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, toWrite)) + } + actual, err := client.DaemonSets(required.Namespace).Update(toWrite) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go new file mode 100644 index 0000000000000000000000000000000000000000..c3a8d467c31d1ea5d037be8a2c63e3cd9eedaf20 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go @@ -0,0 +1,362 @@ +package resourceapply + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + coreclientv1 "k8s.io/client-go/kubernetes/typed/core/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyNamespace merges objectmeta, does not worry about anything else +func ApplyNamespace(client coreclientv1.NamespacesGetter, recorder events.Recorder, required *corev1.Namespace) (*corev1.Namespace, bool, error) { + existing, err := client.Namespaces().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Namespaces().Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + if !*modified { + return existingCopy, false, nil + } + + if klog.V(4) { + klog.Infof("Namespace %q changes: %v", required.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.Namespaces().Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyService merges objectmeta and requires +// TODO, since this cannot determine whether changes are due to legitimate actors (api server) or illegitimate ones (users), we cannot update +// TODO I've special cased the selector for now +func ApplyService(client coreclientv1.ServicesGetter, recorder events.Recorder, required *corev1.Service) (*corev1.Service, bool, error) { + existing, err := client.Services(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Services(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + selectorSame := equality.Semantic.DeepEqual(existingCopy.Spec.Selector, required.Spec.Selector) + + typeSame := false + requiredIsEmpty := len(required.Spec.Type) == 0 + existingCopyIsCluster := existingCopy.Spec.Type == corev1.ServiceTypeClusterIP + if (requiredIsEmpty && existingCopyIsCluster) || equality.Semantic.DeepEqual(existingCopy.Spec.Type, required.Spec.Type) { + typeSame = true + } + + if selectorSame && typeSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Spec.Selector = required.Spec.Selector + existingCopy.Spec.Type = required.Spec.Type // if this is different, the update will fail. Status will indicate it. + + if klog.V(4) { + klog.Infof("Service %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, required)) + } + + actual, err := client.Services(required.Namespace).Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyPod merges objectmeta, does not worry about anything else +func ApplyPod(client coreclientv1.PodsGetter, recorder events.Recorder, required *corev1.Pod) (*corev1.Pod, bool, error) { + existing, err := client.Pods(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Pods(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + if !*modified { + return existingCopy, false, nil + } + + if klog.V(4) { + klog.Infof("Pod %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, required)) + } + + actual, err := client.Pods(required.Namespace).Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyServiceAccount merges objectmeta, does not worry about anything else +func ApplyServiceAccount(client coreclientv1.ServiceAccountsGetter, recorder events.Recorder, required *corev1.ServiceAccount) (*corev1.ServiceAccount, bool, error) { + existing, err := client.ServiceAccounts(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.ServiceAccounts(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + if !*modified { + return existingCopy, false, nil + } + if klog.V(4) { + klog.Infof("ServiceAccount %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, required)) + } + actual, err := client.ServiceAccounts(required.Namespace).Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyConfigMap merges objectmeta, requires data +func ApplyConfigMap(client coreclientv1.ConfigMapsGetter, recorder events.Recorder, required *corev1.ConfigMap) (*corev1.ConfigMap, bool, error) { + existing, err := client.ConfigMaps(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.ConfigMaps(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + + caBundleInjected := required.Labels["config.openshift.io/inject-trusted-cabundle"] == "true" + _, newCABundleRequired := required.Data["ca-bundle.crt"] + + var modifiedKeys []string + for existingCopyKey, existingCopyValue := range existingCopy.Data { + // if we're injecting a ca-bundle and the required isn't forcing the value, then don't use the value of existing + // to drive a diff detection. If required has set the value then we need to force the value in order to have apply + // behave predictably. + if caBundleInjected && !newCABundleRequired && existingCopyKey == "ca-bundle.crt" { + continue + } + if requiredValue, ok := required.Data[existingCopyKey]; !ok || (existingCopyValue != requiredValue) { + modifiedKeys = append(modifiedKeys, "data."+existingCopyKey) + } + } + for existingCopyKey, existingCopyBinValue := range existingCopy.BinaryData { + if requiredBinValue, ok := required.BinaryData[existingCopyKey]; !ok || !bytes.Equal(existingCopyBinValue, requiredBinValue) { + modifiedKeys = append(modifiedKeys, "binaryData."+existingCopyKey) + } + } + for requiredKey := range required.Data { + if _, ok := existingCopy.Data[requiredKey]; !ok { + modifiedKeys = append(modifiedKeys, "data."+requiredKey) + } + } + for requiredBinKey := range required.BinaryData { + if _, ok := existingCopy.BinaryData[requiredBinKey]; !ok { + modifiedKeys = append(modifiedKeys, "binaryData."+requiredBinKey) + } + } + + dataSame := len(modifiedKeys) == 0 + if dataSame && !*modified { + return existingCopy, false, nil + } + existingCopy.Data = required.Data + existingCopy.BinaryData = required.BinaryData + // if we're injecting a cabundle, and we had a previous value, and the required object isn't setting the value, then set back to the previous + if existingCABundle, existedBefore := existing.Data["ca-bundle.crt"]; caBundleInjected && existedBefore && !newCABundleRequired { + if existingCopy.Data == nil { + existingCopy.Data = map[string]string{} + } + existingCopy.Data["ca-bundle.crt"] = existingCABundle + } + + actual, err := client.ConfigMaps(required.Namespace).Update(existingCopy) + + var details string + if !dataSame { + sort.Sort(sort.StringSlice(modifiedKeys)) + details = fmt.Sprintf("cause by changes in %v", strings.Join(modifiedKeys, ",")) + } + if klog.V(4) { + klog.Infof("ConfigMap %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, required)) + } + reportUpdateEvent(recorder, required, err, details) + return actual, true, err +} + +// ApplySecret merges objectmeta, requires data +func ApplySecret(client coreclientv1.SecretsGetter, recorder events.Recorder, required *corev1.Secret) (*corev1.Secret, bool, error) { + if len(required.StringData) > 0 { + return nil, false, fmt.Errorf("Secret.stringData is not supported") + } + + existing, err := client.Secrets(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Secrets(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(resourcemerge.BoolPtr(false), &existingCopy.ObjectMeta, required.ObjectMeta) + + switch required.Type { + case corev1.SecretTypeServiceAccountToken: + // Secrets for ServiceAccountTokens will have data injected by kube controller manager. + // We will apply only the explicitly set keys. + if existingCopy.Data == nil { + existingCopy.Data = map[string][]byte{} + } + + for k, v := range required.Data { + existingCopy.Data[k] = v + } + + default: + existingCopy.Data = required.Data + } + + existingCopy.Type = required.Type + + if equality.Semantic.DeepEqual(existingCopy, existing) { + return existing, false, nil + } + + if klog.V(4) { + klog.Infof("Secret %s/%s changes: %v", required.Namespace, required.Name, JSONPatchSecretNoError(existing, existingCopy)) + } + actual, err := client.Secrets(required.Namespace).Update(existingCopy) + reportUpdateEvent(recorder, existingCopy, err) + + if err == nil { + return actual, true, err + } + if !strings.Contains(err.Error(), "field is immutable") { + return actual, true, err + } + + // if the field was immutable on a secret, we're going to be stuck until we delete it. Try to delete and then create + deleteErr := client.Secrets(required.Namespace).Delete(existingCopy.Name, nil) + reportDeleteEvent(recorder, existingCopy, deleteErr) + + // clear the RV and track the original actual and error for the return like our create value. + existingCopy.ResourceVersion = "" + actual, err = client.Secrets(required.Namespace).Create(existingCopy) + reportCreateEvent(recorder, existingCopy, err) + + return actual, true, err +} + +func SyncConfigMap(client coreclientv1.ConfigMapsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference) (*corev1.ConfigMap, bool, error) { + source, err := client.ConfigMaps(sourceNamespace).Get(sourceName, metav1.GetOptions{}) + switch { + case apierrors.IsNotFound(err): + deleteErr := client.ConfigMaps(targetNamespace).Delete(targetName, nil) + if _, getErr := client.ConfigMaps(targetNamespace).Get(targetName, metav1.GetOptions{}); getErr != nil && apierrors.IsNotFound(getErr) { + return nil, true, nil + } + if apierrors.IsNotFound(deleteErr) { + return nil, false, nil + } + if deleteErr == nil { + recorder.Eventf("TargetConfigDeleted", "Deleted target configmap %s/%s because source config does not exist", targetNamespace, targetName) + return nil, true, nil + } + return nil, false, deleteErr + case err != nil: + return nil, false, err + default: + source.Namespace = targetNamespace + source.Name = targetName + source.ResourceVersion = "" + source.OwnerReferences = ownerRefs + return ApplyConfigMap(client, recorder, source) + } +} + +func SyncSecret(client coreclientv1.SecretsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference) (*corev1.Secret, bool, error) { + source, err := client.Secrets(sourceNamespace).Get(sourceName, metav1.GetOptions{}) + switch { + case apierrors.IsNotFound(err): + if _, getErr := client.Secrets(targetNamespace).Get(targetName, metav1.GetOptions{}); getErr != nil && apierrors.IsNotFound(getErr) { + return nil, true, nil + } + deleteErr := client.Secrets(targetNamespace).Delete(targetName, nil) + if apierrors.IsNotFound(deleteErr) { + return nil, false, nil + } + if deleteErr == nil { + recorder.Eventf("TargetSecretDeleted", "Deleted target secret %s/%s because source config does not exist", targetNamespace, targetName) + return nil, true, nil + } + return nil, false, deleteErr + case err != nil: + return nil, false, err + default: + if source.Type == corev1.SecretTypeServiceAccountToken { + + // Make sure the token is already present, otherwise we have to wait before creating the target + if len(source.Data[corev1.ServiceAccountTokenKey]) == 0 { + return nil, false, fmt.Errorf("secret %s/%s doesn't have a token yet", source.Namespace, source.Name) + } + + if source.Annotations != nil { + // When syncing a service account token we have to remove the SA annotation to disable injection into copies + delete(source.Annotations, corev1.ServiceAccountNameKey) + // To make it clean, remove the dormant annotations as well + delete(source.Annotations, corev1.ServiceAccountUIDKey) + } + + // SecretTypeServiceAccountToken implies required fields and injection which we do not want in copies + source.Type = corev1.SecretTypeOpaque + } + + source.Namespace = targetNamespace + source.Name = targetName + source.ResourceVersion = "" + source.OwnerReferences = ownerRefs + return ApplySecret(client, recorder, source) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers.go new file mode 100644 index 0000000000000000000000000000000000000000..af598993f956357856da22f91b8802aaee5f6896 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers.go @@ -0,0 +1,56 @@ +package resourceapply + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + + openshiftapi "github.com/openshift/api" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcehelper" +) + +var ( + openshiftScheme = runtime.NewScheme() +) + +func init() { + if err := openshiftapi.Install(openshiftScheme); err != nil { + panic(err) + } +} + +func reportCreateEvent(recorder events.Recorder, obj runtime.Object, originalErr error) { + gvk := resourcehelper.GuessObjectGroupVersionKind(obj) + if originalErr == nil { + recorder.Eventf(fmt.Sprintf("%sCreated", gvk.Kind), "Created %s because it was missing", resourcehelper.FormatResourceForCLIWithNamespace(obj)) + return + } + recorder.Warningf(fmt.Sprintf("%sCreateFailed", gvk.Kind), "Failed to create %s: %v", resourcehelper.FormatResourceForCLIWithNamespace(obj), originalErr) +} + +func reportUpdateEvent(recorder events.Recorder, obj runtime.Object, originalErr error, details ...string) { + gvk := resourcehelper.GuessObjectGroupVersionKind(obj) + switch { + case originalErr != nil: + recorder.Warningf(fmt.Sprintf("%sUpdateFailed", gvk.Kind), "Failed to update %s: %v", resourcehelper.FormatResourceForCLIWithNamespace(obj), originalErr) + case len(details) == 0: + recorder.Eventf(fmt.Sprintf("%sUpdated", gvk.Kind), "Updated %s because it changed", resourcehelper.FormatResourceForCLIWithNamespace(obj)) + default: + recorder.Eventf(fmt.Sprintf("%sUpdated", gvk.Kind), "Updated %s:\n%s", resourcehelper.FormatResourceForCLIWithNamespace(obj), strings.Join(details, "\n")) + } +} + +func reportDeleteEvent(recorder events.Recorder, obj runtime.Object, originalErr error, details ...string) { + gvk := resourcehelper.GuessObjectGroupVersionKind(obj) + switch { + case originalErr != nil: + recorder.Warningf(fmt.Sprintf("%sDeleteFailed", gvk.Kind), "Failed to delete %s: %v", resourcehelper.FormatResourceForCLIWithNamespace(obj), originalErr) + case len(details) == 0: + recorder.Eventf(fmt.Sprintf("%sDeleted", gvk.Kind), "Deleted %s", resourcehelper.FormatResourceForCLIWithNamespace(obj)) + default: + recorder.Eventf(fmt.Sprintf("%sDeleted", gvk.Kind), "Deleted %s:\n%s", resourcehelper.FormatResourceForCLIWithNamespace(obj), strings.Join(details, "\n")) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go new file mode 100644 index 0000000000000000000000000000000000000000..e0cd1ac782da4c3c4993d2462ca86cb7cf9419b7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go @@ -0,0 +1,185 @@ +package resourceapply + +import ( + "fmt" + + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + + "github.com/openshift/library-go/pkg/operator/v1helpers" + + "github.com/openshift/api" + "github.com/openshift/library-go/pkg/operator/events" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes" +) + +var ( + genericScheme = runtime.NewScheme() + genericCodecs = serializer.NewCodecFactory(genericScheme) + genericCodec = genericCodecs.UniversalDeserializer() +) + +func init() { + utilruntime.Must(api.InstallKube(genericScheme)) + utilruntime.Must(apiextensionsv1beta1.AddToScheme(genericScheme)) +} + +type AssetFunc func(name string) ([]byte, error) + +type ApplyResult struct { + File string + Type string + Result runtime.Object + Changed bool + Error error +} + +type ClientHolder struct { + kubeClient kubernetes.Interface + apiExtensionsClient apiextensionsclient.Interface + kubeInformers v1helpers.KubeInformersForNamespaces +} + +func NewClientHolder() *ClientHolder { + return &ClientHolder{} +} + +func NewKubeClientHolder(client kubernetes.Interface) *ClientHolder { + return NewClientHolder().WithKubernetes(client) +} + +func (c *ClientHolder) WithKubernetes(client kubernetes.Interface) *ClientHolder { + c.kubeClient = client + return c +} + +func (c *ClientHolder) WithKubernetesInformers(kubeInformers v1helpers.KubeInformersForNamespaces) *ClientHolder { + c.kubeInformers = kubeInformers + return c +} + +func (c *ClientHolder) WithAPIExtensionsClient(client apiextensionsclient.Interface) *ClientHolder { + c.apiExtensionsClient = client + return c +} + +// ApplyDirectly applies the given manifest files to API server. +func ApplyDirectly(clients *ClientHolder, recorder events.Recorder, manifests AssetFunc, files ...string) []ApplyResult { + ret := []ApplyResult{} + + for _, file := range files { + result := ApplyResult{File: file} + objBytes, err := manifests(file) + if err != nil { + result.Error = fmt.Errorf("missing %q: %v", file, err) + ret = append(ret, result) + continue + } + requiredObj, _, err := genericCodec.Decode(objBytes, nil, nil) + if err != nil { + result.Error = fmt.Errorf("cannot decode %q: %v", file, err) + ret = append(ret, result) + continue + } + result.Type = fmt.Sprintf("%T", requiredObj) + + // NOTE: Do not add CR resources into this switch otherwise the protobuf client can cause problems. + switch t := requiredObj.(type) { + case *corev1.Namespace: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } + result.Result, result.Changed, result.Error = ApplyNamespace(clients.kubeClient.CoreV1(), recorder, t) + case *corev1.Service: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } + result.Result, result.Changed, result.Error = ApplyService(clients.kubeClient.CoreV1(), recorder, t) + case *corev1.Pod: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } + result.Result, result.Changed, result.Error = ApplyPod(clients.kubeClient.CoreV1(), recorder, t) + case *corev1.ServiceAccount: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } + result.Result, result.Changed, result.Error = ApplyServiceAccount(clients.kubeClient.CoreV1(), recorder, t) + case *corev1.ConfigMap: + client := clients.configMapsGetter() + if client == nil { + result.Error = fmt.Errorf("missing kubeClient") + } + result.Result, result.Changed, result.Error = ApplyConfigMap(client, recorder, t) + case *corev1.Secret: + client := clients.secretsGetter() + if client == nil { + result.Error = fmt.Errorf("missing kubeClient") + } + result.Result, result.Changed, result.Error = ApplySecret(client, recorder, t) + case *rbacv1.ClusterRole: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } + result.Result, result.Changed, result.Error = ApplyClusterRole(clients.kubeClient.RbacV1(), recorder, t) + case *rbacv1.ClusterRoleBinding: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } + result.Result, result.Changed, result.Error = ApplyClusterRoleBinding(clients.kubeClient.RbacV1(), recorder, t) + case *rbacv1.Role: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } + result.Result, result.Changed, result.Error = ApplyRole(clients.kubeClient.RbacV1(), recorder, t) + case *rbacv1.RoleBinding: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } + result.Result, result.Changed, result.Error = ApplyRoleBinding(clients.kubeClient.RbacV1(), recorder, t) + case *apiextensionsv1beta1.CustomResourceDefinition: + if clients.apiExtensionsClient == nil { + result.Error = fmt.Errorf("missing apiExtensionsClient") + } + result.Result, result.Changed, result.Error = ApplyCustomResourceDefinitionV1Beta1(clients.apiExtensionsClient.ApiextensionsV1beta1(), recorder, t) + case *apiextensionsv1.CustomResourceDefinition: + if clients.apiExtensionsClient == nil { + result.Error = fmt.Errorf("missing apiExtensionsClient") + } + result.Result, result.Changed, result.Error = ApplyCustomResourceDefinitionV1(clients.apiExtensionsClient.ApiextensionsV1(), recorder, t) + default: + result.Error = fmt.Errorf("unhandled type %T", requiredObj) + } + + ret = append(ret, result) + } + + return ret +} + +func (c *ClientHolder) configMapsGetter() corev1client.ConfigMapsGetter { + if c.kubeClient == nil { + return nil + } + if c.kubeInformers == nil { + return c.kubeClient.CoreV1() + } + return v1helpers.CachedConfigMapGetter(c.kubeClient.CoreV1(), c.kubeInformers) +} + +func (c *ClientHolder) secretsGetter() corev1client.SecretsGetter { + if c.kubeClient == nil { + return nil + } + if c.kubeInformers == nil { + return c.kubeClient.CoreV1() + } + return v1helpers.CachedSecretGetter(c.kubeClient.CoreV1(), c.kubeInformers) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go new file mode 100644 index 0000000000000000000000000000000000000000..ac9699affeacc6c597e283e16322f1a5eb9cf200 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go @@ -0,0 +1,70 @@ +package resourceapply + +import ( + "fmt" + + patch "github.com/evanphx/json-patch" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +// JSONPatchNoError generates a JSON patch between original and modified objects and return the JSON as a string. +// Note: +// +// In case of error, the returned string will contain the error messages. +func JSONPatchNoError(original, modified runtime.Object) string { + if original == nil { + return "original object is nil" + } + if modified == nil { + return "modified object is nil" + } + originalJSON, err := runtime.Encode(unstructured.UnstructuredJSONScheme, original) + if err != nil { + return fmt.Sprintf("unable to decode original to JSON: %v", err) + } + modifiedJSON, err := runtime.Encode(unstructured.UnstructuredJSONScheme, modified) + if err != nil { + return fmt.Sprintf("unable to decode modified to JSON: %v", err) + } + patchBytes, err := patch.CreateMergePatch(originalJSON, modifiedJSON) + if err != nil { + return fmt.Sprintf("unable to create JSON patch: %v", err) + } + return string(patchBytes) +} + +// JSONPatchSecretNoError generates a JSON patch between original and modified secrets, hiding its data, +// and return the JSON as a string. +// +// Note: +// In case of error, the returned string will contain the error messages. +func JSONPatchSecretNoError(original, modified *corev1.Secret) string { + if original == nil { + return "original object is nil" + } + if modified == nil { + return "modified object is nil" + } + + safeModified := modified.DeepCopy() + safeOriginal := original.DeepCopy() + + for s := range safeOriginal.Data { + safeOriginal.Data[s] = []byte("OLD") + } + for s := range safeModified.Data { + if _, preoriginal := original.Data[s]; !preoriginal { + safeModified.Data[s] = []byte("NEW") + } else if !equality.Semantic.DeepEqual(original.Data[s], safeModified.Data[s]) { + safeModified.Data[s] = []byte("MODIFIED") + } else { + safeModified.Data[s] = []byte("OLD") + } + } + + return JSONPatchNoError(safeOriginal, safeModified) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go new file mode 100644 index 0000000000000000000000000000000000000000..51cadfbff8741e6d657dba5cc6fcb3288640b35a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go @@ -0,0 +1,101 @@ +package resourceapply + +import ( + "fmt" + + "github.com/ghodss/yaml" + "github.com/imdario/mergo" + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + + "github.com/openshift/library-go/pkg/operator/events" +) + +var serviceMonitorGVR = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "servicemonitors"} + +func ensureServiceMonitorSpec(required, existing *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + requiredSpec, _, err := unstructured.NestedMap(required.UnstructuredContent(), "spec") + if err != nil { + return nil, false, err + } + existingSpec, _, err := unstructured.NestedMap(existing.UnstructuredContent(), "spec") + if err != nil { + return nil, false, err + } + + if err := mergo.Merge(&existingSpec, &requiredSpec); err != nil { + return nil, false, err + } + + if equality.Semantic.DeepEqual(existingSpec, requiredSpec) { + return existing, false, nil + } + + existingCopy := existing.DeepCopy() + if err := unstructured.SetNestedMap(existingCopy.UnstructuredContent(), existingSpec, "spec"); err != nil { + return nil, true, err + } + + return existingCopy, true, nil +} + +// ApplyServiceMonitor applies the Prometheus service monitor. +func ApplyServiceMonitor(client dynamic.Interface, recorder events.Recorder, serviceMonitorBytes []byte) (bool, error) { + monitorJSON, err := yaml.YAMLToJSON(serviceMonitorBytes) + if err != nil { + return false, err + } + + monitorObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, monitorJSON) + if err != nil { + return false, err + } + + required, ok := monitorObj.(*unstructured.Unstructured) + if !ok { + return false, fmt.Errorf("unexpected object in %t", monitorObj) + } + + namespace := required.GetNamespace() + + existing, err := client.Resource(serviceMonitorGVR).Namespace(namespace).Get(required.GetName(), metav1.GetOptions{}) + if errors.IsNotFound(err) { + _, createErr := client.Resource(serviceMonitorGVR).Namespace(namespace).Create(required, metav1.CreateOptions{}) + if createErr != nil { + recorder.Warningf("ServiceMonitorCreateFailed", "Failed to create ServiceMonitor.monitoring.coreos.com/v1: %v", createErr) + return true, createErr + } + recorder.Eventf("ServiceMonitorCreated", "Created ServiceMonitor.monitoring.coreos.com/v1 because it was missing") + return true, nil + } + + existingCopy := existing.DeepCopy() + + updated, endpointsModified, err := ensureServiceMonitorSpec(required, existingCopy) + if err != nil { + return false, err + } + + if !endpointsModified { + return false, nil + } + + if klog.V(4) { + klog.Infof("ServiceMonitor %q changes: %v", namespace+"/"+required.GetName(), JSONPatchNoError(existing, existingCopy)) + } + + if _, err = client.Resource(serviceMonitorGVR).Namespace(namespace).Update(updated, metav1.UpdateOptions{}); err != nil { + recorder.Warningf("ServiceMonitorUpdateFailed", "Failed to update ServiceMonitor.monitoring.coreos.com/v1: %v", err) + return true, err + } + + recorder.Eventf("ServiceMonitorUpdated", "Updated ServiceMonitor.monitoring.coreos.com/v1 because it changed") + return true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go new file mode 100644 index 0000000000000000000000000000000000000000..7377cc48eb884ada5e76d55d9e13a23f67903b57 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go @@ -0,0 +1,190 @@ +package resourceapply + +import ( + "fmt" + + "k8s.io/klog" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rbacclientv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyClusterRole merges objectmeta, requires rules, aggregation rules are not allowed for now. +func ApplyClusterRole(client rbacclientv1.ClusterRolesGetter, recorder events.Recorder, required *rbacv1.ClusterRole) (*rbacv1.ClusterRole, bool, error) { + if required.AggregationRule != nil && len(required.AggregationRule.ClusterRoleSelectors) != 0 { + return nil, false, fmt.Errorf("cannot create an aggregated cluster role") + } + + existing, err := client.ClusterRoles().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.ClusterRoles().Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + contentSame := equality.Semantic.DeepEqual(existingCopy.Rules, required.Rules) + if contentSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Rules = required.Rules + existingCopy.AggregationRule = nil + + if klog.V(4) { + klog.Infof("ClusterRole %q changes: %v", required.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.ClusterRoles().Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyClusterRoleBinding merges objectmeta, requires subjects and role refs +// TODO on non-matching roleref, delete and recreate +func ApplyClusterRoleBinding(client rbacclientv1.ClusterRoleBindingsGetter, recorder events.Recorder, required *rbacv1.ClusterRoleBinding) (*rbacv1.ClusterRoleBinding, bool, error) { + existing, err := client.ClusterRoleBindings().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.ClusterRoleBindings().Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + requiredCopy := required.DeepCopy() + + // Enforce apiGroup fields in roleRefs + existingCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range existingCopy.Subjects { + if existingCopy.Subjects[i].Kind == "User" { + existingCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + requiredCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range requiredCopy.Subjects { + if existingCopy.Subjects[i].Kind == "User" { + requiredCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, requiredCopy.ObjectMeta) + + subjectsAreSame := equality.Semantic.DeepEqual(existingCopy.Subjects, requiredCopy.Subjects) + roleRefIsSame := equality.Semantic.DeepEqual(existingCopy.RoleRef, requiredCopy.RoleRef) + + if subjectsAreSame && roleRefIsSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Subjects = requiredCopy.Subjects + existingCopy.RoleRef = requiredCopy.RoleRef + + if klog.V(4) { + klog.Infof("ClusterRoleBinding %q changes: %v", requiredCopy.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.ClusterRoleBindings().Update(existingCopy) + reportUpdateEvent(recorder, requiredCopy, err) + return actual, true, err +} + +// ApplyRole merges objectmeta, requires rules +func ApplyRole(client rbacclientv1.RolesGetter, recorder events.Recorder, required *rbacv1.Role) (*rbacv1.Role, bool, error) { + existing, err := client.Roles(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Roles(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + contentSame := equality.Semantic.DeepEqual(existingCopy.Rules, required.Rules) + if contentSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Rules = required.Rules + + if klog.V(4) { + klog.Infof("Role %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, existingCopy)) + } + actual, err := client.Roles(required.Namespace).Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyRoleBinding merges objectmeta, requires subjects and role refs +// TODO on non-matching roleref, delete and recreate +func ApplyRoleBinding(client rbacclientv1.RoleBindingsGetter, recorder events.Recorder, required *rbacv1.RoleBinding) (*rbacv1.RoleBinding, bool, error) { + existing, err := client.RoleBindings(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.RoleBindings(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + requiredCopy := required.DeepCopy() + + // Enforce apiGroup fields in roleRefs and subjects + existingCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range existingCopy.Subjects { + if existingCopy.Subjects[i].Kind == "User" { + existingCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + requiredCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range requiredCopy.Subjects { + if existingCopy.Subjects[i].Kind == "User" { + requiredCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, requiredCopy.ObjectMeta) + + subjectsAreSame := equality.Semantic.DeepEqual(existingCopy.Subjects, requiredCopy.Subjects) + roleRefIsSame := equality.Semantic.DeepEqual(existingCopy.RoleRef, requiredCopy.RoleRef) + + if subjectsAreSame && roleRefIsSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Subjects = requiredCopy.Subjects + existingCopy.RoleRef = requiredCopy.RoleRef + + if klog.V(4) { + klog.Infof("RoleBinding %q changes: %v", requiredCopy.Namespace+"/"+requiredCopy.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.RoleBindings(requiredCopy.Namespace).Update(existingCopy) + reportUpdateEvent(recorder, requiredCopy, err) + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go new file mode 100644 index 0000000000000000000000000000000000000000..0c3cd965effd0a3a22f98cc0d75cbed0e696387e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go @@ -0,0 +1,50 @@ +package resourceapply + +import ( + "k8s.io/klog" + + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + storageclientv1 "k8s.io/client-go/kubernetes/typed/storage/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyStorageClass merges objectmeta, tries to write everything else +func ApplyStorageClass(client storageclientv1.StorageClassesGetter, recorder events.Recorder, required *storagev1.StorageClass) (*storagev1.StorageClass, bool, + error) { + existing, err := client.StorageClasses().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.StorageClasses().Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + contentSame := equality.Semantic.DeepEqual(existingCopy, required) + if contentSame && !*modified { + return existingCopy, false, nil + } + + objectMeta := existingCopy.ObjectMeta.DeepCopy() + existingCopy = required.DeepCopy() + existingCopy.ObjectMeta = *objectMeta + + if klog.V(4) { + klog.Infof("StorageClass %q changes: %v", required.Name, JSONPatchNoError(existing, existingCopy)) + } + + // TODO if provisioner, parameters, reclaimpolicy, or volumebindingmode are different, update will fail so delete and recreate + actual, err := client.StorageClasses().Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehelper/resource_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehelper/resource_helpers.go new file mode 100644 index 0000000000000000000000000000000000000000..43ea9111c6f27df0f71c7769ff118d0d9a058190 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehelper/resource_helpers.go @@ -0,0 +1,76 @@ +package resourcehelper + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" + + "github.com/openshift/api" +) + +var ( + openshiftScheme = runtime.NewScheme() +) + +func init() { + if err := api.Install(openshiftScheme); err != nil { + panic(err) + } +} + +// FormatResourceForCLIWithNamespace generates a string that can be copy/pasted for use with oc get that includes +// specifying the namespace with the -n option (e.g., `ConfigMap/cluster-config-v1 -n kube-system`). +func FormatResourceForCLIWithNamespace(obj runtime.Object) string { + gvk := GuessObjectGroupVersionKind(obj) + kind := gvk.Kind + group := gvk.Group + var name, namespace string + accessor, err := meta.Accessor(obj) + if err != nil { + name = "<unknown>" + namespace = "<unknown>" + } else { + name = accessor.GetName() + namespace = accessor.GetNamespace() + } + if len(group) > 0 { + group = "." + group + } + if len(namespace) > 0 { + namespace = " -n " + namespace + } + return kind + group + "/" + name + namespace +} + +// FormatResourceForCLI generates a string that can be copy/pasted for use with oc get. +func FormatResourceForCLI(obj runtime.Object) string { + gvk := GuessObjectGroupVersionKind(obj) + kind := gvk.Kind + group := gvk.Group + var name string + accessor, err := meta.Accessor(obj) + if err != nil { + name = "<unknown>" + } else { + name = accessor.GetName() + } + if len(group) > 0 { + group = "." + group + } + return kind + group + "/" + name +} + +// GuessObjectGroupVersionKind returns a human readable for the passed runtime object. +func GuessObjectGroupVersionKind(object runtime.Object) schema.GroupVersionKind { + if gvk := object.GetObjectKind().GroupVersionKind(); len(gvk.Kind) > 0 { + return gvk + } + if kinds, _, _ := scheme.Scheme.ObjectKinds(object); len(kinds) > 0 { + return kinds[0] + } + if kinds, _, _ := openshiftScheme.ObjectKinds(object); len(kinds) > 0 { + return kinds[0] + } + return schema.GroupVersionKind{Kind: "<unknown>"} +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go new file mode 100644 index 0000000000000000000000000000000000000000..06e4743f47aa55ac1e0836736440cad715dc24b5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go @@ -0,0 +1,31 @@ +package resourcemerge + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/equality" +) + +// EnsureCustomResourceDefinitionV1Beta1 ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureCustomResourceDefinitionV1Beta1(modified *bool, existing *apiextensionsv1beta1.CustomResourceDefinition, required apiextensionsv1beta1.CustomResourceDefinition) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + + // we stomp everything + if !equality.Semantic.DeepEqual(existing.Spec, required.Spec) { + *modified = true + existing.Spec = required.Spec + } +} + +// EnsureCustomResourceDefinitionV1 ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureCustomResourceDefinitionV1(modified *bool, existing *apiextensionsv1.CustomResourceDefinition, required apiextensionsv1.CustomResourceDefinition) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + + // we stomp everything + if !equality.Semantic.DeepEqual(existing.Spec, required.Spec) { + *modified = true + existing.Spec = required.Spec + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go new file mode 100644 index 0000000000000000000000000000000000000000..1731382e6884ab48359612edde10ab398e203afb --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go @@ -0,0 +1,80 @@ +package resourcemerge + +import ( + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + operatorsv1 "github.com/openshift/api/operator/v1" +) + +func GenerationFor(generations []operatorsv1.GenerationStatus, resource schema.GroupResource, namespace, name string) *operatorsv1.GenerationStatus { + for i := range generations { + curr := &generations[i] + if curr.Namespace == namespace && + curr.Name == name && + curr.Group == resource.Group && + curr.Resource == resource.Resource { + + return curr + } + } + + return nil +} + +func SetGeneration(generations *[]operatorsv1.GenerationStatus, newGeneration operatorsv1.GenerationStatus) { + if generations == nil { + generations = &[]operatorsv1.GenerationStatus{} + } + + existingGeneration := GenerationFor(*generations, schema.GroupResource{Group: newGeneration.Group, Resource: newGeneration.Resource}, newGeneration.Namespace, newGeneration.Name) + if existingGeneration == nil { + *generations = append(*generations, newGeneration) + return + } + + existingGeneration.LastGeneration = newGeneration.LastGeneration + existingGeneration.Hash = newGeneration.Hash +} + +func ExpectedDeploymentGeneration(required *appsv1.Deployment, previousGenerations []operatorsv1.GenerationStatus) int64 { + generation := GenerationFor(previousGenerations, schema.GroupResource{Group: "apps", Resource: "deployments"}, required.Namespace, required.Name) + if generation != nil { + return generation.LastGeneration + } + return -1 +} + +func SetDeploymentGeneration(generations *[]operatorsv1.GenerationStatus, actual *appsv1.Deployment) { + if actual == nil { + return + } + SetGeneration(generations, operatorsv1.GenerationStatus{ + Group: "apps", + Resource: "deployments", + Namespace: actual.Namespace, + Name: actual.Name, + LastGeneration: actual.ObjectMeta.Generation, + }) +} + +func ExpectedDaemonSetGeneration(required *appsv1.DaemonSet, previousGenerations []operatorsv1.GenerationStatus) int64 { + generation := GenerationFor(previousGenerations, schema.GroupResource{Group: "apps", Resource: "daemonsets"}, required.Namespace, required.Name) + if generation != nil { + return generation.LastGeneration + } + return -1 +} + +func SetDaemonSetGeneration(generations *[]operatorsv1.GenerationStatus, actual *appsv1.DaemonSet) { + if actual == nil { + return + } + SetGeneration(generations, operatorsv1.GenerationStatus{ + Group: "apps", + Resource: "daemonsets", + Namespace: actual.Namespace, + Name: actual.Name, + LastGeneration: actual.ObjectMeta.Generation, + }) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go new file mode 100644 index 0000000000000000000000000000000000000000..dddb32ca44c9c238f897f6887f85a2916cd882d8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go @@ -0,0 +1,271 @@ +package resourcemerge + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strings" + + "k8s.io/klog" + "sigs.k8s.io/yaml" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + kyaml "k8s.io/apimachinery/pkg/util/yaml" +) + +// MergeConfigMap takes a configmap, the target key, special overlay funcs a list of config configs to overlay on top of each other +// It returns the resultant configmap and a bool indicating if any changes were made to the configmap +func MergeConfigMap(configMap *corev1.ConfigMap, configKey string, specialCases map[string]MergeFunc, configYAMLs ...[]byte) (*corev1.ConfigMap, bool, error) { + return MergePrunedConfigMap(nil, configMap, configKey, specialCases, configYAMLs...) +} + +// MergePrunedConfigMap takes a configmap, the target key, special overlay funcs a list of config configs to overlay on top of each other +// It returns the resultant configmap and a bool indicating if any changes were made to the configmap. +// It roundtrips the config through the given schema. +func MergePrunedConfigMap(schema runtime.Object, configMap *corev1.ConfigMap, configKey string, specialCases map[string]MergeFunc, configYAMLs ...[]byte) (*corev1.ConfigMap, bool, error) { + configBytes, err := MergePrunedProcessConfig(schema, specialCases, configYAMLs...) + if err != nil { + return nil, false, err + } + + if reflect.DeepEqual(configMap.Data[configKey], configBytes) { + return configMap, false, nil + } + + ret := configMap.DeepCopy() + ret.Data[configKey] = string(configBytes) + + return ret, true, nil +} + +// MergeProcessConfig merges a series of config yaml files together with each later one overlaying all previous +func MergeProcessConfig(specialCases map[string]MergeFunc, configYAMLs ...[]byte) ([]byte, error) { + currentConfigYAML := configYAMLs[0] + + for _, currConfigYAML := range configYAMLs[1:] { + prevConfigJSON, err := kyaml.ToJSON(currentConfigYAML) + if err != nil { + klog.Warning(err) + // maybe it's just json + prevConfigJSON = currentConfigYAML + } + prevConfig := map[string]interface{}{} + if err := json.NewDecoder(bytes.NewBuffer(prevConfigJSON)).Decode(&prevConfig); err != nil { + return nil, err + } + + if len(currConfigYAML) > 0 { + currConfigJSON, err := kyaml.ToJSON(currConfigYAML) + if err != nil { + klog.Warning(err) + // maybe it's just json + currConfigJSON = currConfigYAML + } + currConfig := map[string]interface{}{} + if err := json.NewDecoder(bytes.NewBuffer(currConfigJSON)).Decode(&currConfig); err != nil { + return nil, err + } + + // protected against mismatched typemeta + prevAPIVersion, _, _ := unstructured.NestedString(prevConfig, "apiVersion") + prevKind, _, _ := unstructured.NestedString(prevConfig, "kind") + currAPIVersion, _, _ := unstructured.NestedString(currConfig, "apiVersion") + currKind, _, _ := unstructured.NestedString(currConfig, "kind") + currGVKSet := len(currAPIVersion) > 0 || len(currKind) > 0 + gvkMismatched := currAPIVersion != prevAPIVersion || currKind != prevKind + if currGVKSet && gvkMismatched { + return nil, fmt.Errorf("%v/%v does not equal %v/%v", currAPIVersion, currKind, prevAPIVersion, prevKind) + } + + if err := mergeConfig(prevConfig, currConfig, "", specialCases); err != nil { + return nil, err + } + } + + currentConfigYAML, err = runtime.Encode(unstructured.UnstructuredJSONScheme, &unstructured.Unstructured{Object: prevConfig}) + if err != nil { + return nil, err + } + } + + return currentConfigYAML, nil +} + +// MergePrunedProcessConfig merges a series of config yaml files together with each later one overlaying all previous. +// The result is roundtripped through the given schema if it is non-nil. +func MergePrunedProcessConfig(schema runtime.Object, specialCases map[string]MergeFunc, configYAMLs ...[]byte) ([]byte, error) { + bs, err := MergeProcessConfig(specialCases, configYAMLs...) + if err != nil { + return nil, err + } + + if schema == nil { + return bs, nil + } + + // roundtrip through the schema + typed := schema.DeepCopyObject() + if err := yaml.Unmarshal(bs, typed); err != nil { + return nil, err + } + typedBytes, err := json.Marshal(typed) + if err != nil { + return nil, err + } + var untypedJSON map[string]interface{} + if err := json.Unmarshal(typedBytes, &untypedJSON); err != nil { + return nil, err + } + + // and intersect output with input because we cannot rely on omitempty in the schema + inputBytes, err := yaml.YAMLToJSON(bs) + if err != nil { + return nil, err + } + var inputJSON map[string]interface{} + if err := json.Unmarshal(inputBytes, &inputJSON); err != nil { + return nil, err + } + return json.Marshal(intersectJSON(inputJSON, untypedJSON)) +} + +type MergeFunc func(dst, src interface{}, currentPath string) (interface{}, error) + +var _ MergeFunc = RemoveConfig + +// RemoveConfig is a merge func that elimintes an entire path from the config +func RemoveConfig(dst, src interface{}, currentPath string) (interface{}, error) { + return dst, nil +} + +// mergeConfig overwrites entries in curr by additional. It modifies curr. +func mergeConfig(curr, additional map[string]interface{}, currentPath string, specialCases map[string]MergeFunc) error { + for additionalKey, additionalVal := range additional { + fullKey := currentPath + "." + additionalKey + specialCase, ok := specialCases[fullKey] + if ok { + var err error + curr[additionalKey], err = specialCase(curr[additionalKey], additionalVal, currentPath) + if err != nil { + return err + } + continue + } + + currVal, ok := curr[additionalKey] + if !ok { + curr[additionalKey] = additionalVal + continue + } + + // only some scalars are accepted + switch castVal := additionalVal.(type) { + case map[string]interface{}: + currValAsMap, ok := currVal.(map[string]interface{}) + if !ok { + currValAsMap = map[string]interface{}{} + curr[additionalKey] = currValAsMap + } + + err := mergeConfig(currValAsMap, castVal, fullKey, specialCases) + if err != nil { + return err + } + continue + + default: + if err := unstructured.SetNestedField(curr, castVal, additionalKey); err != nil { + return err + } + } + + } + + return nil +} + +// jsonIntersection returns the intersection of both JSON object, +// preferring the values of the first argument. +func intersectJSON(x1, x2 map[string]interface{}) map[string]interface{} { + if x1 == nil || x2 == nil { + return nil + } + ret := map[string]interface{}{} + for k, v1 := range x1 { + v2, ok := x2[k] + if !ok { + continue + } + ret[k] = intersectValue(v1, v2) + } + return ret +} + +func intersectArray(x1, x2 []interface{}) []interface{} { + if x1 == nil || x2 == nil { + return nil + } + ret := make([]interface{}, 0, len(x1)) + for i := range x1 { + if i >= len(x2) { + break + } + ret = append(ret, intersectValue(x1[i], x2[i])) + } + return ret +} + +func intersectValue(x1, x2 interface{}) interface{} { + switch x1 := x1.(type) { + case map[string]interface{}: + x2, ok := x2.(map[string]interface{}) + if !ok { + return x1 + } + return intersectJSON(x1, x2) + case []interface{}: + x2, ok := x2.([]interface{}) + if !ok { + return x1 + } + return intersectArray(x1, x2) + default: + return x1 + } +} + +// IsRequiredConfigPresent can check an observedConfig to see if certain required paths are present in that config. +// This allows operators to require certain configuration to be observed before proceeding to honor a configuration or roll it out. +func IsRequiredConfigPresent(config []byte, requiredPaths [][]string) error { + if len(config) == 0 { + return fmt.Errorf("no observedConfig") + } + + existingConfig := map[string]interface{}{} + if err := json.NewDecoder(bytes.NewBuffer(config)).Decode(&existingConfig); err != nil { + return fmt.Errorf("error parsing config, %v", err) + } + + for _, requiredPath := range requiredPaths { + configVal, found, err := unstructured.NestedFieldNoCopy(existingConfig, requiredPath...) + if err != nil { + return fmt.Errorf("error reading %v from config, %v", strings.Join(requiredPath, "."), err) + } + if !found { + return fmt.Errorf("%v missing from config", strings.Join(requiredPath, ".")) + } + if configVal == nil { + return fmt.Errorf("%v null in config", strings.Join(requiredPath, ".")) + } + if configValSlice, ok := configVal.([]interface{}); ok && len(configValSlice) == 0 { + return fmt.Errorf("%v empty in config", strings.Join(requiredPath, ".")) + } + if configValString, ok := configVal.(string); ok && len(configValString) == 0 { + return fmt.Errorf("%v empty in config", strings.Join(requiredPath, ".")) + } + } + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go new file mode 100644 index 0000000000000000000000000000000000000000..9d03da6e2dc1aaafe7655ac4eac427c056053c7f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go @@ -0,0 +1,153 @@ +package resourcemerge + +import ( + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EnsureObjectMeta writes namespace, name, labels, and annotations. Don't set other things here. +// TODO finalizer support maybe? +func EnsureObjectMeta(modified *bool, existing *metav1.ObjectMeta, required metav1.ObjectMeta) { + SetStringIfSet(modified, &existing.Namespace, required.Namespace) + SetStringIfSet(modified, &existing.Name, required.Name) + MergeMap(modified, &existing.Labels, required.Labels) + MergeMap(modified, &existing.Annotations, required.Annotations) +} + +func stringPtr(val string) *string { + return &val +} + +func SetString(modified *bool, existing *string, required string) { + if required != *existing { + *existing = required + *modified = true + } +} + +func SetStringIfSet(modified *bool, existing *string, required string) { + if len(required) == 0 { + return + } + if required != *existing { + *existing = required + *modified = true + } +} + +func setStringPtr(modified *bool, existing **string, required *string) { + if *existing == nil || (required == nil && *existing != nil) { + *modified = true + *existing = required + return + } + SetString(modified, *existing, *required) +} + +func SetStringSlice(modified *bool, existing *[]string, required []string) { + if !reflect.DeepEqual(required, *existing) { + *existing = required + *modified = true + } +} + +func SetStringSliceIfSet(modified *bool, existing *[]string, required []string) { + if required == nil { + return + } + if !reflect.DeepEqual(required, *existing) { + *existing = required + *modified = true + } +} + +func BoolPtr(val bool) *bool { + return &val +} + +func SetBool(modified *bool, existing *bool, required bool) { + if required != *existing { + *existing = required + *modified = true + } +} + +func setBoolPtr(modified *bool, existing **bool, required *bool) { + if *existing == nil || (required == nil && *existing != nil) { + *modified = true + *existing = required + return + } + SetBool(modified, *existing, *required) +} + +func int64Ptr(val int64) *int64 { + return &val +} + +func SetInt32(modified *bool, existing *int32, required int32) { + if required != *existing { + *existing = required + *modified = true + } +} + +func SetInt32IfSet(modified *bool, existing *int32, required int32) { + if required == 0 { + return + } + + SetInt32(modified, existing, required) +} + +func SetInt64(modified *bool, existing *int64, required int64) { + if required != *existing { + *existing = required + *modified = true + } +} + +func setInt64Ptr(modified *bool, existing **int64, required *int64) { + if *existing == nil || (required == nil && *existing != nil) { + *modified = true + *existing = required + return + } + SetInt64(modified, *existing, *required) +} + +func MergeMap(modified *bool, existing *map[string]string, required map[string]string) { + if *existing == nil { + *existing = map[string]string{} + } + for k, v := range required { + if existingV, ok := (*existing)[k]; !ok || v != existingV { + *modified = true + (*existing)[k] = v + } + } +} + +func SetMapStringString(modified *bool, existing *map[string]string, required map[string]string) { + if *existing == nil { + *existing = map[string]string{} + } + + if !reflect.DeepEqual(*existing, required) { + *existing = required + } +} + +func SetMapStringStringIfSet(modified *bool, existing *map[string]string, required map[string]string) { + if required == nil { + return + } + if *existing == nil { + *existing = map[string]string{} + } + + if !reflect.DeepEqual(*existing, required) { + *existing = required + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go new file mode 100644 index 0000000000000000000000000000000000000000..1060ba739f5372b89971e5c34ba4564ab8087e7e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go @@ -0,0 +1,126 @@ +package v1helpers + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +var ( + emptyGetOptions = metav1.GetOptions{} + emptyListOptions = metav1.ListOptions{} +) + +type combinedConfigMapGetter struct { + client corev1client.ConfigMapsGetter + listers KubeInformersForNamespaces +} + +func CachedConfigMapGetter(client corev1client.ConfigMapsGetter, listers KubeInformersForNamespaces) corev1client.ConfigMapsGetter { + return &combinedConfigMapGetter{ + client: client, + listers: listers, + } +} + +type combinedConfigMapInterface struct { + corev1client.ConfigMapInterface + lister corev1listers.ConfigMapNamespaceLister + namespace string +} + +func (g combinedConfigMapGetter) ConfigMaps(namespace string) corev1client.ConfigMapInterface { + return combinedConfigMapInterface{ + ConfigMapInterface: g.client.ConfigMaps(namespace), + lister: g.listers.InformersFor(namespace).Core().V1().ConfigMaps().Lister().ConfigMaps(namespace), + namespace: namespace, + } +} + +func (g combinedConfigMapInterface) Get(name string, options metav1.GetOptions) (*corev1.ConfigMap, error) { + if !equality.Semantic.DeepEqual(options, emptyGetOptions) { + return nil, fmt.Errorf("GetOptions are not honored by cached client: %#v", options) + } + + ret, err := g.lister.Get(name) + if err != nil { + return nil, err + } + return ret.DeepCopy(), nil +} +func (g combinedConfigMapInterface) List(options metav1.ListOptions) (*corev1.ConfigMapList, error) { + if !equality.Semantic.DeepEqual(options, emptyListOptions) { + return nil, fmt.Errorf("ListOptions are not honored by cached client: %#v", options) + } + + list, err := g.lister.List(labels.Everything()) + if err != nil { + return nil, err + } + + ret := &corev1.ConfigMapList{} + for i := range list { + ret.Items = append(ret.Items, *(list[i].DeepCopy())) + } + return ret, nil +} + +type combinedSecretGetter struct { + client corev1client.SecretsGetter + listers KubeInformersForNamespaces +} + +func CachedSecretGetter(client corev1client.SecretsGetter, listers KubeInformersForNamespaces) corev1client.SecretsGetter { + return &combinedSecretGetter{ + client: client, + listers: listers, + } +} + +type combinedSecretInterface struct { + corev1client.SecretInterface + lister corev1listers.SecretNamespaceLister + namespace string +} + +func (g combinedSecretGetter) Secrets(namespace string) corev1client.SecretInterface { + return combinedSecretInterface{ + SecretInterface: g.client.Secrets(namespace), + lister: g.listers.InformersFor(namespace).Core().V1().Secrets().Lister().Secrets(namespace), + namespace: namespace, + } +} + +func (g combinedSecretInterface) Get(name string, options metav1.GetOptions) (*corev1.Secret, error) { + if !equality.Semantic.DeepEqual(options, emptyGetOptions) { + return nil, fmt.Errorf("GetOptions are not honored by cached client: %#v", options) + } + + ret, err := g.lister.Get(name) + if err != nil { + return nil, err + } + return ret.DeepCopy(), nil +} + +func (g combinedSecretInterface) List(options metav1.ListOptions) (*corev1.SecretList, error) { + if !equality.Semantic.DeepEqual(options, emptyListOptions) { + return nil, fmt.Errorf("ListOptions are not honored by cached client: %#v", options) + } + + list, err := g.lister.List(labels.Everything()) + if err != nil { + return nil, err + } + + ret := &corev1.SecretList{} + for i := range list { + ret.Items = append(ret.Items, *(list[i].DeepCopy())) + } + return ret, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go new file mode 100644 index 0000000000000000000000000000000000000000..8933328978bd896e0dd5c42868402281a7271ad8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go @@ -0,0 +1,7 @@ +package v1helpers + +import "k8s.io/client-go/informers" + +func NewFakeKubeInformersForNamespaces(informers map[string]informers.SharedInformerFactory) KubeInformersForNamespaces { + return kubeInformersForNamespaces(informers) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go new file mode 100644 index 0000000000000000000000000000000000000000..65fb3e930917c5f752e9898c7274425ba32b813b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go @@ -0,0 +1,281 @@ +package v1helpers + +import ( + "sort" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/util/retry" + + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" +) + +// SetOperandVersion sets the new version and returns the previous value. +func SetOperandVersion(versions *[]configv1.OperandVersion, operandVersion configv1.OperandVersion) string { + if versions == nil { + versions = &[]configv1.OperandVersion{} + } + existingVersion := FindOperandVersion(*versions, operandVersion.Name) + if existingVersion == nil { + *versions = append(*versions, operandVersion) + return "" + } + + previous := existingVersion.Version + existingVersion.Version = operandVersion.Version + return previous +} + +func FindOperandVersion(versions []configv1.OperandVersion, name string) *configv1.OperandVersion { + if versions == nil { + return nil + } + for i := range versions { + if versions[i].Name == name { + return &versions[i] + } + } + return nil +} + +func SetOperatorCondition(conditions *[]operatorv1.OperatorCondition, newCondition operatorv1.OperatorCondition) { + if conditions == nil { + conditions = &[]operatorv1.OperatorCondition{} + } + existingCondition := FindOperatorCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +func RemoveOperatorCondition(conditions *[]operatorv1.OperatorCondition, conditionType string) { + if conditions == nil { + conditions = &[]operatorv1.OperatorCondition{} + } + newConditions := []operatorv1.OperatorCondition{} + for _, condition := range *conditions { + if condition.Type != conditionType { + newConditions = append(newConditions, condition) + } + } + + *conditions = newConditions +} + +func FindOperatorCondition(conditions []operatorv1.OperatorCondition, conditionType string) *operatorv1.OperatorCondition { + for i := range conditions { + if conditions[i].Type == conditionType { + return &conditions[i] + } + } + + return nil +} + +func IsOperatorConditionTrue(conditions []operatorv1.OperatorCondition, conditionType string) bool { + return IsOperatorConditionPresentAndEqual(conditions, conditionType, operatorv1.ConditionTrue) +} + +func IsOperatorConditionFalse(conditions []operatorv1.OperatorCondition, conditionType string) bool { + return IsOperatorConditionPresentAndEqual(conditions, conditionType, operatorv1.ConditionFalse) +} + +func IsOperatorConditionPresentAndEqual(conditions []operatorv1.OperatorCondition, conditionType string, status operatorv1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} + +// UpdateOperatorSpecFunc is a func that mutates an operator spec. +type UpdateOperatorSpecFunc func(spec *operatorv1.OperatorSpec) error + +// UpdateSpec applies the update funcs to the oldStatus and tries to update via the client. +func UpdateSpec(client OperatorClient, updateFuncs ...UpdateOperatorSpecFunc) (*operatorv1.OperatorSpec, bool, error) { + updated := false + var operatorSpec *operatorv1.OperatorSpec + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + oldSpec, _, resourceVersion, err := client.GetOperatorState() + if err != nil { + return err + } + + newSpec := oldSpec.DeepCopy() + for _, update := range updateFuncs { + if err := update(newSpec); err != nil { + return err + } + } + + if equality.Semantic.DeepEqual(oldSpec, newSpec) { + return nil + } + + operatorSpec, _, err = client.UpdateOperatorSpec(resourceVersion, newSpec) + updated = err == nil + return err + }) + + return operatorSpec, updated, err +} + +// UpdateSpecConfigFn returns a func to update the config. +func UpdateObservedConfigFn(config map[string]interface{}) UpdateOperatorSpecFunc { + return func(oldSpec *operatorv1.OperatorSpec) error { + oldSpec.ObservedConfig = runtime.RawExtension{Object: &unstructured.Unstructured{Object: config}} + return nil + } +} + +// UpdateStatusFunc is a func that mutates an operator status. +type UpdateStatusFunc func(status *operatorv1.OperatorStatus) error + +// UpdateStatus applies the update funcs to the oldStatus and tries to update via the client. +func UpdateStatus(client OperatorClient, updateFuncs ...UpdateStatusFunc) (*operatorv1.OperatorStatus, bool, error) { + updated := false + var updatedOperatorStatus *operatorv1.OperatorStatus + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + _, oldStatus, resourceVersion, err := client.GetOperatorState() + if err != nil { + return err + } + + newStatus := oldStatus.DeepCopy() + for _, update := range updateFuncs { + if err := update(newStatus); err != nil { + return err + } + } + + if equality.Semantic.DeepEqual(oldStatus, newStatus) { + // We return the newStatus which is a deep copy of oldStatus but with all update funcs applied. + updatedOperatorStatus = newStatus + return nil + } + + updatedOperatorStatus, err = client.UpdateOperatorStatus(resourceVersion, newStatus) + updated = err == nil + return err + }) + + return updatedOperatorStatus, updated, err +} + +// UpdateConditionFunc returns a func to update a condition. +func UpdateConditionFn(cond operatorv1.OperatorCondition) UpdateStatusFunc { + return func(oldStatus *operatorv1.OperatorStatus) error { + SetOperatorCondition(&oldStatus.Conditions, cond) + return nil + } +} + +// UpdateStatusFunc is a func that mutates an operator status. +type UpdateStaticPodStatusFunc func(status *operatorv1.StaticPodOperatorStatus) error + +// UpdateStaticPodStatus applies the update funcs to the oldStatus abd tries to update via the client. +func UpdateStaticPodStatus(client StaticPodOperatorClient, updateFuncs ...UpdateStaticPodStatusFunc) (*operatorv1.StaticPodOperatorStatus, bool, error) { + updated := false + var updatedOperatorStatus *operatorv1.StaticPodOperatorStatus + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + _, oldStatus, resourceVersion, err := client.GetStaticPodOperatorState() + if err != nil { + return err + } + + newStatus := oldStatus.DeepCopy() + for _, update := range updateFuncs { + if err := update(newStatus); err != nil { + return err + } + } + + if equality.Semantic.DeepEqual(oldStatus, newStatus) { + // We return the newStatus which is a deep copy of oldStatus but with all update funcs applied. + updatedOperatorStatus = newStatus + return nil + } + + updatedOperatorStatus, err = client.UpdateStaticPodOperatorStatus(resourceVersion, newStatus) + updated = err == nil + return err + }) + + return updatedOperatorStatus, updated, err +} + +// UpdateStaticPodConditionFn returns a func to update a condition. +func UpdateStaticPodConditionFn(cond operatorv1.OperatorCondition) UpdateStaticPodStatusFunc { + return func(oldStatus *operatorv1.StaticPodOperatorStatus) error { + SetOperatorCondition(&oldStatus.Conditions, cond) + return nil + } +} + +type aggregate []error + +var _ utilerrors.Aggregate = aggregate{} + +// NewMultiLineAggregate returns an aggregate error with multi-line output +func NewMultiLineAggregate(errList []error) error { + var errs []error + for _, e := range errList { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregate(errs) +} + +// Error is part of the error interface. +func (agg aggregate) Error() string { + msgs := make([]string, len(agg)) + for i := range agg { + msgs[i] = agg[i].Error() + } + return strings.Join(msgs, "\n") +} + +// Errors is part of the Aggregate interface. +func (agg aggregate) Errors() []error { + return []error(agg) +} + +// MapToEnvVars converts a string-string map to a slice of corev1.EnvVar-s +func MapToEnvVars(mapEnvVars map[string]string) []corev1.EnvVar { + if mapEnvVars == nil { + return nil + } + + envVars := make([]corev1.EnvVar, len(mapEnvVars)) + i := 0 + for k, v := range mapEnvVars { + envVars[i] = corev1.EnvVar{Name: k, Value: v} + i++ + } + + // need to sort the slice so that kube-controller-manager-pod configmap does not change all the time + sort.Slice(envVars, func(i, j int) bool { return envVars[i].Name < envVars[j].Name }) + return envVars +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go new file mode 100644 index 0000000000000000000000000000000000000000..8a3636b334bf7037f971438145fc8c17166175b1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go @@ -0,0 +1,105 @@ +package v1helpers + +import ( + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +// KubeInformersForNamespaces is a simple way to combine several shared informers into a single struct with unified listing power +type KubeInformersForNamespaces interface { + Start(stopCh <-chan struct{}) + InformersFor(namespace string) informers.SharedInformerFactory + Namespaces() sets.String + + ConfigMapLister() corev1listers.ConfigMapLister + SecretLister() corev1listers.SecretLister +} + +func NewKubeInformersForNamespaces(kubeClient kubernetes.Interface, namespaces ...string) KubeInformersForNamespaces { + ret := kubeInformersForNamespaces{} + for _, namespace := range namespaces { + if len(namespace) == 0 { + ret[""] = informers.NewSharedInformerFactory(kubeClient, 10*time.Minute) + continue + } + ret[namespace] = informers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Minute, informers.WithNamespace(namespace)) + } + + return ret +} + +type kubeInformersForNamespaces map[string]informers.SharedInformerFactory + +func (i kubeInformersForNamespaces) Start(stopCh <-chan struct{}) { + for _, informer := range i { + informer.Start(stopCh) + } +} + +func (i kubeInformersForNamespaces) Namespaces() sets.String { + return sets.StringKeySet(i) +} +func (i kubeInformersForNamespaces) InformersFor(namespace string) informers.SharedInformerFactory { + return i[namespace] +} + +func (i kubeInformersForNamespaces) HasInformersFor(namespace string) bool { + return i.InformersFor(namespace) != nil +} + +type configMapLister kubeInformersForNamespaces + +func (i kubeInformersForNamespaces) ConfigMapLister() corev1listers.ConfigMapLister { + return configMapLister(i) +} + +func (l configMapLister) List(selector labels.Selector) (ret []*corev1.ConfigMap, err error) { + globalInformer, ok := l[""] + if !ok { + return nil, fmt.Errorf("combinedLister does not support cross namespace list") + } + + return globalInformer.Core().V1().ConfigMaps().Lister().List(selector) +} + +func (l configMapLister) ConfigMaps(namespace string) corev1listers.ConfigMapNamespaceLister { + informer, ok := l[namespace] + if !ok { + // coding error + panic(fmt.Sprintf("namespace %q is missing", namespace)) + } + + return informer.Core().V1().ConfigMaps().Lister().ConfigMaps(namespace) +} + +type secretLister kubeInformersForNamespaces + +func (i kubeInformersForNamespaces) SecretLister() corev1listers.SecretLister { + return secretLister(i) +} + +func (l secretLister) List(selector labels.Selector) (ret []*corev1.Secret, err error) { + globalInformer, ok := l[""] + if !ok { + return nil, fmt.Errorf("combinedLister does not support cross namespace list") + } + + return globalInformer.Core().V1().Secrets().Lister().List(selector) +} + +func (l secretLister) Secrets(namespace string) corev1listers.SecretNamespaceLister { + informer, ok := l[namespace] + if !ok { + // coding error + panic(fmt.Sprintf("namespace %q is missing", namespace)) + } + + return informer.Core().V1().Secrets().Lister().Secrets(namespace) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go new file mode 100644 index 0000000000000000000000000000000000000000..4afb23a61215f3f48f0044cafe446fa766d623fa --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go @@ -0,0 +1,30 @@ +package v1helpers + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + "k8s.io/client-go/tools/cache" +) + +type OperatorClient interface { + Informer() cache.SharedIndexInformer + // GetOperatorState returns the operator spec, status and the resource version, potentially from a lister. + GetOperatorState() (spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, resourceVersion string, err error) + // UpdateOperatorSpec updates the spec of the operator, assuming the given resource version. + UpdateOperatorSpec(oldResourceVersion string, in *operatorv1.OperatorSpec) (out *operatorv1.OperatorSpec, newResourceVersion string, err error) + // UpdateOperatorStatus updates the status of the operator, assuming the given resource version. + UpdateOperatorStatus(oldResourceVersion string, in *operatorv1.OperatorStatus) (out *operatorv1.OperatorStatus, err error) +} + +type StaticPodOperatorClient interface { + OperatorClient + // GetStaticPodOperatorState returns the static pod operator spec, status and the resource version, + // potentially from a lister. + GetStaticPodOperatorState() (spec *operatorv1.StaticPodOperatorSpec, status *operatorv1.StaticPodOperatorStatus, resourceVersion string, err error) + // GetStaticPodOperatorStateWithQuorum return the static pod operator spec, status and resource version + // directly from a server read. + GetStaticPodOperatorStateWithQuorum() (spec *operatorv1.StaticPodOperatorSpec, status *operatorv1.StaticPodOperatorStatus, resourceVersion string, err error) + // UpdateStaticPodOperatorStatus updates the status, assuming the given resource version. + UpdateStaticPodOperatorStatus(resourceVersion string, in *operatorv1.StaticPodOperatorStatus) (out *operatorv1.StaticPodOperatorStatus, err error) + // UpdateStaticPodOperatorSpec updates the spec, assuming the given resource version. + UpdateStaticPodOperatorSpec(resourceVersion string, in *operatorv1.StaticPodOperatorSpec) (out *operatorv1.StaticPodOperatorSpec, newResourceVersion string, err error) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go new file mode 100644 index 0000000000000000000000000000000000000000..dc336276e041c14e8b768682b753b296efa63928 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go @@ -0,0 +1,230 @@ +package v1helpers + +import ( + "fmt" + "strconv" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +// NewFakeSharedIndexInformer returns a fake shared index informer, suitable to use in static pod controller unit tests. +func NewFakeSharedIndexInformer() cache.SharedIndexInformer { + return &fakeSharedIndexInformer{} +} + +type fakeSharedIndexInformer struct{} + +func (fakeSharedIndexInformer) AddEventHandler(handler cache.ResourceEventHandler) { +} + +func (fakeSharedIndexInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) { +} + +func (fakeSharedIndexInformer) GetStore() cache.Store { + panic("implement me") +} + +func (fakeSharedIndexInformer) GetController() cache.Controller { + panic("implement me") +} + +func (fakeSharedIndexInformer) Run(stopCh <-chan struct{}) { + panic("implement me") +} + +func (fakeSharedIndexInformer) HasSynced() bool { + panic("implement me") +} + +func (fakeSharedIndexInformer) LastSyncResourceVersion() string { + panic("implement me") +} + +func (fakeSharedIndexInformer) AddIndexers(indexers cache.Indexers) error { + panic("implement me") +} + +func (fakeSharedIndexInformer) GetIndexer() cache.Indexer { + panic("implement me") +} + +// NewFakeStaticPodOperatorClient returns a fake operator client suitable to use in static pod controller unit tests. +func NewFakeStaticPodOperatorClient( + staticPodSpec *operatorv1.StaticPodOperatorSpec, staticPodStatus *operatorv1.StaticPodOperatorStatus, + triggerStatusErr func(rv string, status *operatorv1.StaticPodOperatorStatus) error, + triggerSpecErr func(rv string, spec *operatorv1.StaticPodOperatorSpec) error) StaticPodOperatorClient { + return &fakeStaticPodOperatorClient{ + fakeStaticPodOperatorSpec: staticPodSpec, + fakeStaticPodOperatorStatus: staticPodStatus, + resourceVersion: "0", + triggerStatusUpdateError: triggerStatusErr, + triggerSpecUpdateError: triggerSpecErr, + } +} + +type fakeStaticPodOperatorClient struct { + fakeStaticPodOperatorSpec *operatorv1.StaticPodOperatorSpec + fakeStaticPodOperatorStatus *operatorv1.StaticPodOperatorStatus + resourceVersion string + triggerStatusUpdateError func(rv string, status *operatorv1.StaticPodOperatorStatus) error + triggerSpecUpdateError func(rv string, status *operatorv1.StaticPodOperatorSpec) error +} + +func (c *fakeStaticPodOperatorClient) Informer() cache.SharedIndexInformer { + return &fakeSharedIndexInformer{} +} + +func (c *fakeStaticPodOperatorClient) GetStaticPodOperatorState() (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) { + return c.fakeStaticPodOperatorSpec, c.fakeStaticPodOperatorStatus, c.resourceVersion, nil +} + +func (c *fakeStaticPodOperatorClient) GetStaticPodOperatorStateWithQuorum() (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) { + return c.fakeStaticPodOperatorSpec, c.fakeStaticPodOperatorStatus, c.resourceVersion, nil +} + +func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorStatus(resourceVersion string, status *operatorv1.StaticPodOperatorStatus) (*operatorv1.StaticPodOperatorStatus, error) { + if c.resourceVersion != resourceVersion { + return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerStatusUpdateError != nil { + if err := c.triggerStatusUpdateError(resourceVersion, status); err != nil { + return nil, err + } + } + c.fakeStaticPodOperatorStatus = status + return c.fakeStaticPodOperatorStatus, nil +} + +func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorSpec(resourceVersion string, spec *operatorv1.StaticPodOperatorSpec) (*operatorv1.StaticPodOperatorSpec, string, error) { + if c.resourceVersion != resourceVersion { + return nil, "", errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, "", err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerSpecUpdateError != nil { + if err := c.triggerSpecUpdateError(resourceVersion, spec); err != nil { + return nil, "", err + } + } + c.fakeStaticPodOperatorSpec = spec + return c.fakeStaticPodOperatorSpec, c.resourceVersion, nil +} + +func (c *fakeStaticPodOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { + return &c.fakeStaticPodOperatorSpec.OperatorSpec, &c.fakeStaticPodOperatorStatus.OperatorStatus, c.resourceVersion, nil +} +func (c *fakeStaticPodOperatorClient) UpdateOperatorSpec(string, *operatorv1.OperatorSpec) (spec *operatorv1.OperatorSpec, resourceVersion string, err error) { + panic("not supported") +} +func (c *fakeStaticPodOperatorClient) UpdateOperatorStatus(resourceVersion string, status *operatorv1.OperatorStatus) (*operatorv1.OperatorStatus, error) { + if c.resourceVersion != resourceVersion { + return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerStatusUpdateError != nil { + staticPodStatus := c.fakeStaticPodOperatorStatus.DeepCopy() + staticPodStatus.OperatorStatus = *status + if err := c.triggerStatusUpdateError(resourceVersion, staticPodStatus); err != nil { + return nil, err + } + } + c.fakeStaticPodOperatorStatus.OperatorStatus = *status + return &c.fakeStaticPodOperatorStatus.OperatorStatus, nil +} + +// NewFakeNodeLister returns a fake node lister suitable to use in node controller unit test +func NewFakeNodeLister(client kubernetes.Interface) corev1listers.NodeLister { + return &fakeNodeLister{client: client} +} + +type fakeNodeLister struct { + client kubernetes.Interface +} + +func (n *fakeNodeLister) List(selector labels.Selector) ([]*corev1.Node, error) { + nodes, err := n.client.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + return nil, err + } + ret := []*corev1.Node{} + for i := range nodes.Items { + ret = append(ret, &nodes.Items[i]) + } + return ret, nil +} + +func (n *fakeNodeLister) Get(name string) (*corev1.Node, error) { + panic("implement me") +} + +func (n *fakeNodeLister) ListWithPredicate(predicate corev1listers.NodeConditionPredicate) ([]*corev1.Node, error) { + panic("implement me") +} + +// NewFakeOperatorClient returns a fake operator client suitable to use in static pod controller unit tests. +func NewFakeOperatorClient(spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) OperatorClient { + return &fakeOperatorClient{ + fakeOperatorSpec: spec, + fakeOperatorStatus: status, + resourceVersion: "0", + triggerStatusUpdateError: triggerErr, + } +} + +type fakeOperatorClient struct { + fakeOperatorSpec *operatorv1.OperatorSpec + fakeOperatorStatus *operatorv1.OperatorStatus + resourceVersion string + triggerStatusUpdateError func(rv string, status *operatorv1.OperatorStatus) error +} + +func (c *fakeOperatorClient) Informer() cache.SharedIndexInformer { + return &fakeSharedIndexInformer{} +} + +func (c *fakeOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { + return c.fakeOperatorSpec, c.fakeOperatorStatus, c.resourceVersion, nil +} + +func (c *fakeOperatorClient) UpdateOperatorStatus(resourceVersion string, status *operatorv1.OperatorStatus) (*operatorv1.OperatorStatus, error) { + if c.resourceVersion != resourceVersion { + return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerStatusUpdateError != nil { + if err := c.triggerStatusUpdateError(resourceVersion, status); err != nil { + return nil, err + } + } + c.fakeOperatorStatus = status + return c.fakeOperatorStatus, nil +} +func (c *fakeOperatorClient) UpdateOperatorSpec(string, *operatorv1.OperatorSpec) (spec *operatorv1.OperatorSpec, resourceVersion string, err error) { + panic("not supported") +} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go index 633ee15dc55b53ebbabfc73870c8e66a1c1b0e29..1350eef22c3ce3846e92656abce8dfa979adbde0 100644 --- a/vendor/golang.org/x/net/html/node.go +++ b/vendor/golang.org/x/net/html/node.go @@ -18,6 +18,11 @@ const ( ElementNode CommentNode DoctypeNode + // RawNode nodes are not returned by the parser, but can be part of the + // Node tree passed to func Render to insert raw HTML (without escaping). + // If so, this package makes no guarantee that the rendered HTML is secure + // (from e.g. Cross Site Scripting attacks) or well-formed. + RawNode scopeMarkerNode ) diff --git a/vendor/golang.org/x/net/html/render.go b/vendor/golang.org/x/net/html/render.go index 8bf47ede7978958e0c8018728978908d0a1d5680..46879c08325ef50afb3520a1d8d039752f843a69 100644 --- a/vendor/golang.org/x/net/html/render.go +++ b/vendor/golang.org/x/net/html/render.go @@ -134,6 +134,9 @@ func render1(w writer, n *Node) error { } } return w.WriteByte('>') + case RawNode: + _, err := w.WriteString(n.Data) + return err default: return errors.New("html: unknown node type") } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 8e0059db668122a889c7cd350f69bb4d956b8259..d948e96eec203cf243f6062512a8fadaceb3e6f6 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -93,7 +93,7 @@ type Transport struct { // send in the initial settings frame. It is how many bytes // of response headers are allowed. Unlike the http2 spec, zero here // means to use a default limit (currently 10MB). If you actually - // want to advertise an ulimited value to the peer, Transport + // want to advertise an unlimited value to the peer, Transport // interprets the highest possible value here (0xffffffff or 1<<32-1) // to mean no limit. MaxHeaderListSize uint32 diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/expansion_generated.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go similarity index 66% rename from vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/expansion_generated.go rename to vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go index 429782deb0a3107db9a9229c2973de64b6775aea..5db6d52d473c5386a067489acea853991b3f9c6e 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,10 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by lister-gen. DO NOT EDIT. +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true -package v1beta1 +// +groupName=imagepolicy.k8s.io -// CustomResourceDefinitionListerExpansion allows custom methods to be added to -// CustomResourceDefinitionLister. -type CustomResourceDefinitionListerExpansion interface{} +package v1alpha1 // import "k8s.io/api/imagepolicy/v1alpha1" diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go new file mode 100644 index 0000000000000000000000000000000000000000..912a93ecb0e613a472850dc537e4e8d8bfd5586b --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go @@ -0,0 +1,1412 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *ImageReview) Reset() { *m = ImageReview{} } +func (*ImageReview) ProtoMessage() {} +func (*ImageReview) Descriptor() ([]byte, []int) { + return fileDescriptor_834793af728657a5, []int{0} +} +func (m *ImageReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReview.Merge(m, src) +} +func (m *ImageReview) XXX_Size() int { + return m.Size() +} +func (m *ImageReview) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReview.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReview proto.InternalMessageInfo + +func (m *ImageReviewContainerSpec) Reset() { *m = ImageReviewContainerSpec{} } +func (*ImageReviewContainerSpec) ProtoMessage() {} +func (*ImageReviewContainerSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_834793af728657a5, []int{1} +} +func (m *ImageReviewContainerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReviewContainerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReviewContainerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReviewContainerSpec.Merge(m, src) +} +func (m *ImageReviewContainerSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageReviewContainerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReviewContainerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReviewContainerSpec proto.InternalMessageInfo + +func (m *ImageReviewSpec) Reset() { *m = ImageReviewSpec{} } +func (*ImageReviewSpec) ProtoMessage() {} +func (*ImageReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_834793af728657a5, []int{2} +} +func (m *ImageReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReviewSpec.Merge(m, src) +} +func (m *ImageReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReviewSpec proto.InternalMessageInfo + +func (m *ImageReviewStatus) Reset() { *m = ImageReviewStatus{} } +func (*ImageReviewStatus) ProtoMessage() {} +func (*ImageReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_834793af728657a5, []int{3} +} +func (m *ImageReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReviewStatus.Merge(m, src) +} +func (m *ImageReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReviewStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ImageReview)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReview") + proto.RegisterType((*ImageReviewContainerSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewContainerSpec") + proto.RegisterType((*ImageReviewSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec.AnnotationsEntry") + proto.RegisterType((*ImageReviewStatus)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus.AuditAnnotationsEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto", fileDescriptor_834793af728657a5) +} + +var fileDescriptor_834793af728657a5 = []byte{ + // 607 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xcf, 0x6e, 0xd3, 0x4c, + 0x14, 0xc5, 0xe3, 0xa4, 0xff, 0x32, 0xf9, 0x3e, 0x9a, 0x0e, 0x20, 0x59, 0x59, 0xb8, 0x55, 0x90, + 0x50, 0x59, 0x30, 0x43, 0x2b, 0x84, 0x0a, 0x0b, 0x50, 0x5c, 0x21, 0x95, 0x05, 0x20, 0x0d, 0xbb, + 0xae, 0x98, 0x38, 0x17, 0xc7, 0x24, 0x9e, 0xb1, 0x3c, 0xe3, 0x94, 0xec, 0x78, 0x02, 0xc4, 0x1b, + 0xf0, 0x22, 0x3c, 0x40, 0x97, 0x5d, 0x76, 0x55, 0x51, 0xb3, 0xe4, 0x25, 0x90, 0xc7, 0x4e, 0x6c, + 0x92, 0x22, 0x94, 0x9d, 0xef, 0xbd, 0x73, 0x7e, 0xf7, 0xcc, 0xf1, 0xa0, 0x93, 0xd1, 0x91, 0x22, + 0x81, 0xa4, 0xa3, 0xa4, 0x0f, 0xb1, 0x00, 0x0d, 0x8a, 0x4e, 0x40, 0x0c, 0x64, 0x4c, 0x8b, 0x01, + 0x8f, 0x02, 0x1a, 0x84, 0xdc, 0x87, 0x48, 0x8e, 0x03, 0x6f, 0x4a, 0x27, 0x07, 0x7c, 0x1c, 0x0d, + 0xf9, 0x01, 0xf5, 0x41, 0x40, 0xcc, 0x35, 0x0c, 0x48, 0x14, 0x4b, 0x2d, 0xf1, 0x6e, 0x2e, 0x20, + 0x3c, 0x0a, 0x48, 0x45, 0x40, 0x66, 0x82, 0xce, 0x43, 0x3f, 0xd0, 0xc3, 0xa4, 0x4f, 0x3c, 0x19, + 0x52, 0x5f, 0xfa, 0x92, 0x1a, 0x5d, 0x3f, 0xf9, 0x60, 0x2a, 0x53, 0x98, 0xaf, 0x9c, 0xd7, 0x79, + 0x5c, 0x1a, 0x08, 0xb9, 0x37, 0x0c, 0x04, 0xc4, 0x53, 0x1a, 0x8d, 0xfc, 0xac, 0xa1, 0x68, 0x08, + 0x9a, 0xd3, 0xc9, 0x92, 0x8b, 0x0e, 0xfd, 0x9b, 0x2a, 0x4e, 0x84, 0x0e, 0x42, 0x58, 0x12, 0x3c, + 0xf9, 0x97, 0x40, 0x79, 0x43, 0x08, 0xf9, 0xa2, 0xae, 0xfb, 0xad, 0x8e, 0x5a, 0xaf, 0xb2, 0x6b, + 0x32, 0x98, 0x04, 0x70, 0x86, 0xdf, 0xa3, 0xad, 0xcc, 0xd3, 0x80, 0x6b, 0x6e, 0x5b, 0x7b, 0xd6, + 0x7e, 0xeb, 0xf0, 0x11, 0x29, 0x13, 0x99, 0xa3, 0x49, 0x34, 0xf2, 0xb3, 0x86, 0x22, 0xd9, 0x69, + 0x32, 0x39, 0x20, 0x6f, 0xfb, 0x1f, 0xc1, 0xd3, 0xaf, 0x41, 0x73, 0x17, 0x9f, 0x5f, 0xed, 0xd6, + 0xd2, 0xab, 0x5d, 0x54, 0xf6, 0xd8, 0x9c, 0x8a, 0x19, 0x5a, 0x53, 0x11, 0x78, 0x76, 0x7d, 0x89, + 0x7e, 0x63, 0xde, 0xa4, 0xe2, 0xee, 0x5d, 0x04, 0x9e, 0xfb, 0x5f, 0x41, 0x5f, 0xcb, 0x2a, 0x66, + 0x58, 0xf8, 0x14, 0x6d, 0x28, 0xcd, 0x75, 0xa2, 0xec, 0x86, 0xa1, 0x1e, 0xae, 0x44, 0x35, 0x4a, + 0xf7, 0x56, 0xc1, 0xdd, 0xc8, 0x6b, 0x56, 0x10, 0xbb, 0x2f, 0x90, 0x5d, 0x39, 0x7c, 0x2c, 0x85, + 0xe6, 0x59, 0x04, 0xd9, 0x76, 0x7c, 0x0f, 0xad, 0x1b, 0xba, 0x89, 0xaa, 0xe9, 0xfe, 0x5f, 0x20, + 0xd6, 0x73, 0x41, 0x3e, 0xeb, 0xfe, 0xaa, 0xa3, 0xed, 0x85, 0x4b, 0xe0, 0x10, 0x21, 0x6f, 0x46, + 0x52, 0xb6, 0xb5, 0xd7, 0xd8, 0x6f, 0x1d, 0x3e, 0x5d, 0xc5, 0xf4, 0x1f, 0x3e, 0xca, 0xc4, 0xe7, + 0x6d, 0xc5, 0x2a, 0x0b, 0xf0, 0x27, 0xd4, 0xe2, 0x42, 0x48, 0xcd, 0x75, 0x20, 0x85, 0xb2, 0xeb, + 0x66, 0x5f, 0x6f, 0xd5, 0xe8, 0x49, 0xaf, 0x64, 0xbc, 0x14, 0x3a, 0x9e, 0xba, 0xb7, 0x8b, 0xbd, + 0xad, 0xca, 0x84, 0x55, 0x57, 0x61, 0x8a, 0x9a, 0x82, 0x87, 0xa0, 0x22, 0xee, 0x81, 0xf9, 0x39, + 0x4d, 0x77, 0xa7, 0x10, 0x35, 0xdf, 0xcc, 0x06, 0xac, 0x3c, 0xd3, 0x79, 0x8e, 0xda, 0x8b, 0x6b, + 0x70, 0x1b, 0x35, 0x46, 0x30, 0xcd, 0x43, 0x66, 0xd9, 0x27, 0xbe, 0x83, 0xd6, 0x27, 0x7c, 0x9c, + 0x80, 0x79, 0x45, 0x4d, 0x96, 0x17, 0xcf, 0xea, 0x47, 0x56, 0xf7, 0x7b, 0x1d, 0xed, 0x2c, 0xfd, + 0x5c, 0xfc, 0x00, 0x6d, 0xf2, 0xf1, 0x58, 0x9e, 0xc1, 0xc0, 0x50, 0xb6, 0xdc, 0xed, 0xc2, 0xc4, + 0x66, 0x2f, 0x6f, 0xb3, 0xd9, 0x1c, 0xdf, 0x47, 0x1b, 0x31, 0x70, 0x25, 0x45, 0xce, 0x2e, 0xdf, + 0x05, 0x33, 0x5d, 0x56, 0x4c, 0xf1, 0x17, 0x0b, 0xb5, 0x79, 0x32, 0x08, 0x74, 0xc5, 0xae, 0xdd, + 0x30, 0xc9, 0x9e, 0xac, 0xfe, 0xfc, 0x48, 0x6f, 0x01, 0x95, 0x07, 0x6c, 0x17, 0xcb, 0xdb, 0x8b, + 0x63, 0xb6, 0xb4, 0xbb, 0x73, 0x8c, 0xee, 0xde, 0x08, 0x59, 0x25, 0x3e, 0x97, 0x9c, 0x5f, 0x3b, + 0xb5, 0x8b, 0x6b, 0xa7, 0x76, 0x79, 0xed, 0xd4, 0x3e, 0xa7, 0x8e, 0x75, 0x9e, 0x3a, 0xd6, 0x45, + 0xea, 0x58, 0x97, 0xa9, 0x63, 0xfd, 0x48, 0x1d, 0xeb, 0xeb, 0x4f, 0xa7, 0x76, 0xba, 0x35, 0xbb, + 0xc8, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x44, 0x16, 0x48, 0xa2, 0x79, 0x05, 0x00, 0x00, +} + +func (m *ImageReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageReviewContainerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageReviewContainerSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReviewContainerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ImageReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AuditAnnotations) > 0 { + keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations)) + for k := range m.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAuditAnnotations[iNdEx]) + copy(dAtA[i:], keysForAuditAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ImageReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageReviewContainerSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AuditAnnotations) > 0 { + for k, v := range m.AuditAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ImageReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageReviewSpec", "ImageReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageReviewStatus", "ImageReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageReviewContainerSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageReviewContainerSpec{`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `}`, + }, "") + return s +} +func (this *ImageReviewSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForContainers := "[]ImageReviewContainerSpec{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "ImageReviewContainerSpec", "ImageReviewContainerSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForContainers += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ImageReviewSpec{`, + `Containers:` + repeatedStringForContainers + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *ImageReviewStatus) String() string { + if this == nil { + return "nil" + } + keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations)) + for k := range this.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + mapStringForAuditAnnotations := "map[string]string{" + for _, k := range keysForAuditAnnotations { + mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k]) + } + mapStringForAuditAnnotations += "}" + s := strings.Join([]string{`&ImageReviewStatus{`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `AuditAnnotations:` + mapStringForAuditAnnotations + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ImageReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageReviewContainerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageReviewContainerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageReviewContainerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, ImageReviewContainerSpec{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuditAnnotations == nil { + m.AuditAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AuditAnnotations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto new file mode 100644 index 0000000000000000000000000000000000000000..381d0091bdc2e3f3fefcaa02f5ab5928388e3519 --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto @@ -0,0 +1,86 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.imagepolicy.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// ImageReview checks if the set of images in a pod are allowed. +message ImageReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the pod being evaluated + optional ImageReviewSpec spec = 2; + + // Status is filled in by the backend and indicates whether the pod should be allowed. + // +optional + optional ImageReviewStatus status = 3; +} + +// ImageReviewContainerSpec is a description of a container within the pod creation request. +message ImageReviewContainerSpec { + // This can be in the form image:tag or image@SHA:012345679abcdef. + // +optional + optional string image = 1; +} + +// ImageReviewSpec is a description of the pod creation request. +message ImageReviewSpec { + // Containers is a list of a subset of the information in each container of the Pod being created. + // +optional + repeated ImageReviewContainerSpec containers = 1; + + // Annotations is a list of key-value pairs extracted from the Pod's annotations. + // It only includes keys which match the pattern `*.image-policy.k8s.io/*`. + // It is up to each webhook backend to determine how to interpret these annotations, if at all. + // +optional + map<string, string> annotations = 2; + + // Namespace is the namespace the pod is being created in. + // +optional + optional string namespace = 3; +} + +// ImageReviewStatus is the result of the review for the pod creation request. +message ImageReviewStatus { + // Allowed indicates that all images were allowed to be run. + optional bool allowed = 1; + + // Reason should be empty unless Allowed is false in which case it + // may contain a short description of what is wrong. Kubernetes + // may truncate excessively long errors when displaying to the user. + // +optional + optional string reason = 2; + + // AuditAnnotations will be added to the attributes object of the + // admission controller request using 'AddAnnotation'. The keys should + // be prefix-less (i.e., the admission controller will add an + // appropriate prefix). + // +optional + map<string, string> auditAnnotations = 3; +} + diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go new file mode 100644 index 0000000000000000000000000000000000000000..477571bbb270327ebfb42900a66bbcac596c6e0f --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "imagepolicy.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ImageReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go new file mode 100644 index 0000000000000000000000000000000000000000..fd689e638d98aefec552e257ddb2d83f4c7794e7 --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go @@ -0,0 +1,80 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageReview checks if the set of images in a pod are allowed. +type ImageReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the pod being evaluated + Spec ImageReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the backend and indicates whether the pod should be allowed. + // +optional + Status ImageReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ImageReviewSpec is a description of the pod creation request. +type ImageReviewSpec struct { + // Containers is a list of a subset of the information in each container of the Pod being created. + // +optional + Containers []ImageReviewContainerSpec `json:"containers,omitempty" protobuf:"bytes,1,rep,name=containers"` + // Annotations is a list of key-value pairs extracted from the Pod's annotations. + // It only includes keys which match the pattern `*.image-policy.k8s.io/*`. + // It is up to each webhook backend to determine how to interpret these annotations, if at all. + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,2,rep,name=annotations"` + // Namespace is the namespace the pod is being created in. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` +} + +// ImageReviewContainerSpec is a description of a container within the pod creation request. +type ImageReviewContainerSpec struct { + // This can be in the form image:tag or image@SHA:012345679abcdef. + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"` + // In future, we may add command line overrides, exec health check command lines, and so on. +} + +// ImageReviewStatus is the result of the review for the pod creation request. +type ImageReviewStatus struct { + // Allowed indicates that all images were allowed to be run. + Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Reason should be empty unless Allowed is false in which case it + // may contain a short description of what is wrong. Kubernetes + // may truncate excessively long errors when displaying to the user. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` + // AuditAnnotations will be added to the attributes object of the + // admission controller request using 'AddAnnotation'. The keys should + // be prefix-less (i.e., the admission controller will add an + // appropriate prefix). + // +optional + AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,3,rep,name=auditAnnotations"` +} diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000000000000000000000000000000000000..0211d94af010740e59606269c93cd412b82a2c17 --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ImageReview = map[string]string{ + "": "ImageReview checks if the set of images in a pod are allowed.", + "spec": "Spec holds information about the pod being evaluated", + "status": "Status is filled in by the backend and indicates whether the pod should be allowed.", +} + +func (ImageReview) SwaggerDoc() map[string]string { + return map_ImageReview +} + +var map_ImageReviewContainerSpec = map[string]string{ + "": "ImageReviewContainerSpec is a description of a container within the pod creation request.", + "image": "This can be in the form image:tag or image@SHA:012345679abcdef.", +} + +func (ImageReviewContainerSpec) SwaggerDoc() map[string]string { + return map_ImageReviewContainerSpec +} + +var map_ImageReviewSpec = map[string]string{ + "": "ImageReviewSpec is a description of the pod creation request.", + "containers": "Containers is a list of a subset of the information in each container of the Pod being created.", + "annotations": "Annotations is a list of key-value pairs extracted from the Pod's annotations. It only includes keys which match the pattern `*.image-policy.k8s.io/*`. It is up to each webhook backend to determine how to interpret these annotations, if at all.", + "namespace": "Namespace is the namespace the pod is being created in.", +} + +func (ImageReviewSpec) SwaggerDoc() map[string]string { + return map_ImageReviewSpec +} + +var map_ImageReviewStatus = map[string]string{ + "": "ImageReviewStatus is the result of the review for the pod creation request.", + "allowed": "Allowed indicates that all images were allowed to be run.", + "reason": "Reason should be empty unless Allowed is false in which case it may contain a short description of what is wrong. Kubernetes may truncate excessively long errors when displaying to the user.", + "auditAnnotations": "AuditAnnotations will be added to the attributes object of the admission controller request using 'AddAnnotation'. The keys should be prefix-less (i.e., the admission controller will add an appropriate prefix).", +} + +func (ImageReviewStatus) SwaggerDoc() map[string]string { + return map_ImageReviewStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000000000000000000000000000000..83d47b7919d4c0657c69cd259185fbac0df1c313 --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,120 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReview) DeepCopyInto(out *ImageReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReview. +func (in *ImageReview) DeepCopy() *ImageReview { + if in == nil { + return nil + } + out := new(ImageReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReviewContainerSpec) DeepCopyInto(out *ImageReviewContainerSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewContainerSpec. +func (in *ImageReviewContainerSpec) DeepCopy() *ImageReviewContainerSpec { + if in == nil { + return nil + } + out := new(ImageReviewContainerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReviewSpec) DeepCopyInto(out *ImageReviewSpec) { + *out = *in + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]ImageReviewContainerSpec, len(*in)) + copy(*out, *in) + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewSpec. +func (in *ImageReviewSpec) DeepCopy() *ImageReviewSpec { + if in == nil { + return nil + } + out := new(ImageReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReviewStatus) DeepCopyInto(out *ImageReviewStatus) { + *out = *in + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewStatus. +func (in *ImageReviewStatus) DeepCopy() *ImageReviewStatus { + if in == nil { + return nil + } + out := new(ImageReviewStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/customresourcedefinition.go deleted file mode 100644 index 87633217b085357c345bfb0a852e1166d39190a6..0000000000000000000000000000000000000000 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/customresourcedefinition.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// CustomResourceDefinitionLister helps list CustomResourceDefinitions. -type CustomResourceDefinitionLister interface { - // List lists all CustomResourceDefinitions in the indexer. - List(selector labels.Selector) (ret []*v1beta1.CustomResourceDefinition, err error) - // Get retrieves the CustomResourceDefinition from the index for a given name. - Get(name string) (*v1beta1.CustomResourceDefinition, error) - CustomResourceDefinitionListerExpansion -} - -// customResourceDefinitionLister implements the CustomResourceDefinitionLister interface. -type customResourceDefinitionLister struct { - indexer cache.Indexer -} - -// NewCustomResourceDefinitionLister returns a new CustomResourceDefinitionLister. -func NewCustomResourceDefinitionLister(indexer cache.Indexer) CustomResourceDefinitionLister { - return &customResourceDefinitionLister{indexer: indexer} -} - -// List lists all CustomResourceDefinitions in the indexer. -func (s *customResourceDefinitionLister) List(selector labels.Selector) (ret []*v1beta1.CustomResourceDefinition, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CustomResourceDefinition)) - }) - return ret, err -} - -// Get retrieves the CustomResourceDefinition from the index for a given name. -func (s *customResourceDefinitionLister) Get(name string) (*v1beta1.CustomResourceDefinition, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("customresourcedefinition"), name) - } - return obj.(*v1beta1.CustomResourceDefinition), nil -} diff --git a/vendor/modules.txt b/vendor/modules.txt index dfc1c6342f830f9a1b53563a85812e6ed46996b0..18f03f7baf5e9ff75bdac2de060c208c73eaa4b3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -10,6 +10,8 @@ github.com/davecgh/go-spew/spew github.com/evanphx/json-patch # github.com/fatih/color v1.7.0 github.com/fatih/color +# github.com/ghodss/yaml v1.0.0 +github.com/ghodss/yaml # github.com/go-logr/logr v0.1.0 github.com/go-logr/logr # github.com/gobuffalo/flect v0.1.5 @@ -33,7 +35,7 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/gofuzz v1.0.0 +# github.com/google/gofuzz v1.1.0 github.com/google/gofuzz # github.com/google/uuid v1.1.1 github.com/google/uuid @@ -100,10 +102,54 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types -# github.com/openshift/api v0.0.0-20200127192224-ffde1bfabb9f +# github.com/openshift/api v0.0.0-20200210091934-a0e53e94816b => github.com/openshift/api v0.0.0-20200618202633-7192180f496a +github.com/openshift/api +github.com/openshift/api/apps +github.com/openshift/api/apps/v1 +github.com/openshift/api/authorization +github.com/openshift/api/authorization/v1 +github.com/openshift/api/build +github.com/openshift/api/build/v1 +github.com/openshift/api/config github.com/openshift/api/config/v1 +github.com/openshift/api/image +github.com/openshift/api/image/docker10 +github.com/openshift/api/image/dockerpre012 +github.com/openshift/api/image/v1 +github.com/openshift/api/imageregistry +github.com/openshift/api/imageregistry/v1 +github.com/openshift/api/kubecontrolplane +github.com/openshift/api/kubecontrolplane/v1 +github.com/openshift/api/legacyconfig/v1 +github.com/openshift/api/network +github.com/openshift/api/network/v1 +github.com/openshift/api/oauth +github.com/openshift/api/oauth/v1 +github.com/openshift/api/openshiftcontrolplane +github.com/openshift/api/openshiftcontrolplane/v1 +github.com/openshift/api/operator +github.com/openshift/api/operator/v1 +github.com/openshift/api/operator/v1alpha1 +github.com/openshift/api/osin +github.com/openshift/api/osin/v1 +github.com/openshift/api/pkg/serialization +github.com/openshift/api/project +github.com/openshift/api/project/v1 +github.com/openshift/api/quota +github.com/openshift/api/quota/v1 +github.com/openshift/api/route +github.com/openshift/api/route/v1 +github.com/openshift/api/samples +github.com/openshift/api/samples/v1 +github.com/openshift/api/security github.com/openshift/api/security/v1 -# github.com/openshift/client-go v0.0.0-20190617165122-8892c0adc000 => github.com/openshift/client-go v0.0.0-20190923180330-3b6373338c9b +github.com/openshift/api/servicecertsigner +github.com/openshift/api/servicecertsigner/v1alpha1 +github.com/openshift/api/template +github.com/openshift/api/template/v1 +github.com/openshift/api/user +github.com/openshift/api/user/v1 +# github.com/openshift/client-go v0.0.0-20200116152001-92a2713fa240 => github.com/openshift/client-go v0.0.0-20200116152001-92a2713fa240 github.com/openshift/client-go/config/clientset/versioned github.com/openshift/client-go/config/clientset/versioned/fake github.com/openshift/client-go/config/clientset/versioned/scheme @@ -114,18 +160,20 @@ github.com/openshift/client-go/config/informers/externalversions/config github.com/openshift/client-go/config/informers/externalversions/config/v1 github.com/openshift/client-go/config/informers/externalversions/internalinterfaces github.com/openshift/client-go/config/listers/config/v1 -github.com/openshift/client-go/security/clientset/versioned/scheme -github.com/openshift/client-go/security/clientset/versioned/typed/security/v1 -# github.com/openshift/cluster-version-operator v3.11.1-0.20190629164025-08cac1c02538+incompatible -github.com/openshift/cluster-version-operator/lib/resourceapply -github.com/openshift/cluster-version-operator/lib/resourcemerge +# github.com/openshift/library-go v0.0.0-00010101000000-000000000000 => github.com/openshift/library-go v0.0.0-20200921144613-67f7770bf823 +github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers +github.com/openshift/library-go/pkg/operator/events +github.com/openshift/library-go/pkg/operator/resource/resourceapply +github.com/openshift/library-go/pkg/operator/resource/resourcehelper +github.com/openshift/library-go/pkg/operator/resource/resourcemerge +github.com/openshift/library-go/pkg/operator/v1helpers # github.com/operator-framework/operator-sdk v0.5.1-0.20190301204940-c2efe6f74e7b github.com/operator-framework/operator-sdk/version # github.com/pkg/errors v0.8.1 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v1.0.0 => github.com/prometheus/client_golang v0.9.2 +# github.com/prometheus/client_golang v1.1.0 => github.com/prometheus/client_golang v0.9.2 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp @@ -172,7 +220,7 @@ github.com/vmware/govmomi/vim25/types github.com/vmware/govmomi/vim25/xml # golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 golang.org/x/crypto/ssh/terminal -# golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 +# golang.org/x/net v0.0.0-20200202094626-16171245cfb2 golang.org/x/net/context golang.org/x/net/context/ctxhttp golang.org/x/net/html @@ -309,6 +357,7 @@ k8s.io/api/core/v1 k8s.io/api/discovery/v1alpha1 k8s.io/api/events/v1beta1 k8s.io/api/extensions/v1beta1 +k8s.io/api/imagepolicy/v1alpha1 k8s.io/api/networking/v1 k8s.io/api/networking/v1beta1 k8s.io/api/node/v1alpha1 @@ -324,7 +373,7 @@ k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.0.0-20190918201827-3de75813f604 => github.com/openshift/kubernetes-apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783 +# k8s.io/apiextensions-apiserver v0.17.1 => github.com/openshift/kubernetes-apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 @@ -332,7 +381,6 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1 # k8s.io/apimachinery v0.17.2 => github.com/openshift/kubernetes-apimachinery v0.0.0-20190913080033-27d36303b655 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -629,7 +677,7 @@ k8s.io/gengo/parser k8s.io/gengo/types # k8s.io/klog v1.0.0 k8s.io/klog -# k8s.io/kube-aggregator v0.0.0-20190404125450-f5e124c822d6 => github.com/openshift/kubernetes-kube-aggregator v0.0.0-20190918161219-8c8f079fddc3 +# k8s.io/kube-aggregator v0.17.1 => github.com/openshift/kubernetes-kube-aggregator v0.0.0-20190918161219-8c8f079fddc3 k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1