diff --git a/Gopkg.lock b/Gopkg.lock index 10fd3044c41538a3e6eaed56945d1d87148b671a..129164c6035c15c8065c4e148e423aeca0bf0e03 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -132,6 +132,14 @@ revision = "1d29f06aebd59ccdf11ae04aa0334ded96e2d909" version = "v0.18.0" +[[projects]] + digest = "1:9059915429f7f3a5f18cfa6b7cab9a28721d7ac6db4079a62044aa229eb7f2a8" + name = "github.com/gobuffalo/envy" + packages = ["."] + pruneopts = "NUT" + revision = "fa0dfdc10b5366ce365b7d9d1755a03e4e797bc5" + version = "v1.6.15" + [[projects]] digest = "1:a1b2a5e38f79688ee8250942d5fa960525fceb1024c855c7bc76fa77b0f3cca2" name = "github.com/gogo/protobuf" @@ -261,6 +269,14 @@ revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" version = "v1.0" +[[projects]] + digest = "1:da62aa6632d04e080b8a8b85a59ed9ed1550842a0099a55f3ae3a20d02a3745a" + name = "github.com/joho/godotenv" + packages = ["."] + pruneopts = "NUT" + revision = "23d116af351c84513e1946b527c88823e476be13" + version = "v1.3.0" + [[projects]] digest = "1:8e36686e8b139f8fe240c1d5cf3a145bc675c22ff8e707857cdd3ae17b00d728" name = "github.com/json-iterator/go" @@ -281,6 +297,14 @@ pruneopts = "NUT" revision = "6243d8e04c3f819e79757e8bc3faa15c3cb27003" +[[projects]] + digest = "1:56dbf15e091bf7926cb33a57cb6bdfc658fc6d3498d2f76f10a97ce7856f1fde" + name = "github.com/markbates/inflect" + packages = ["."] + pruneopts = "NUT" + revision = "24b83195037b3bc61fcda2d28b7b0518bce293b6" + version = "v1.0.4" + [[projects]] digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" name = "github.com/matttproud/golang_protobuf_extensions" @@ -554,6 +578,29 @@ pruneopts = "NUT" revision = "bbced9601137e764853b2fad7ec3e2dc4c504e02" +[[projects]] + digest = "1:e09ada96a5a41deda4748b1659cc8953961799e798aea557257b56baee4ecaf3" + name = "github.com/rogpeppe/go-internal" + packages = [ + "modfile", + "module", + "semver", + ] + pruneopts = "NUT" + revision = "438578804ca6f31be148c27683afc419ce47c06e" + version = "v1.3.0" + +[[projects]] + digest = "1:6792bb72ea0e7112157d02e4e175cd421b43d004a853f56316a19beca6e0c074" + name = "github.com/spf13/afero" + packages = [ + ".", + "mem", + ] + pruneopts = "NUT" + revision = "588a75ec4f32903aa5e39a2619ba6a4631e28424" + version = "v1.2.2" + [[projects]] digest = "1:343d44e06621142ab09ae0c76c1799104cdfddd3ffb445d78b1adf8dc3ffaf3d" name = "github.com/spf13/cobra" @@ -1186,6 +1233,24 @@ revision = "12d98582e72927b6cd0123e2b4e819f9341ce62c" version = "v0.1.10" +[[projects]] + digest = "1:77a19ea61ca4e01817ad2bc3e91689c5097b4b439668127d1fb5d8b95c3aca03" + name = "sigs.k8s.io/controller-tools" + packages = [ + "cmd/controller-gen", + "pkg/crd/generator", + "pkg/crd/util", + "pkg/internal/codegen", + "pkg/internal/codegen/parse", + "pkg/internal/general", + "pkg/rbac", + "pkg/util", + "pkg/webhook", + ] + pruneopts = "NUT" + revision = "b8adde9bc6d7f3fba449d306613a9daed23676c8" + version = "v0.1.10" + [[projects]] digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c" name = "sigs.k8s.io/yaml" @@ -1213,7 +1278,6 @@ "github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators", "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1", "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset", - "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme", "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions", "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1", "github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1", @@ -1274,6 +1338,7 @@ "sigs.k8s.io/controller-runtime/pkg/reconcile", "sigs.k8s.io/controller-runtime/pkg/runtime/signals", "sigs.k8s.io/controller-runtime/pkg/source", + "sigs.k8s.io/controller-tools/cmd/controller-gen", ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index e3a132bb771f1b8bcd8e94616af4930d7cac895a..7ed23ada8ce2898b53cccc228ab839ca88a45c87 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -15,6 +15,7 @@ required = [ "github.com/openshift/cluster-autoscaler-operator/pkg/apis", "github.com/onsi/ginkgo", "github.com/onsi/gomega", + "sigs.k8s.io/controller-tools/cmd/controller-gen", # for crd/rbac generation ] # To override the revision="commit_id" constraint from the cluster-api-actuator-pkg diff --git a/Makefile b/Makefile index e6b5f9694a8685003893e665112c26dcb8d93255..35128a2bfd4bb949702d036898ddbc66c541b5e5 100644 --- a/Makefile +++ b/Makefile @@ -42,6 +42,13 @@ nodelink-controller: machine-healthcheck: $(DOCKER_CMD) ./hack/go-build.sh machine-healthcheck +.PHONY: generate +generate: gen-crd update-codegen + +.PHONY: gen-crd +gen-crd: + $(DOCKER_CMD) ./hack/gen-crd.sh + .PHONY: update-codegen update-codegen: $(DOCKER_CMD) ./hack/update-codegen.sh diff --git a/README.md b/README.md index e7aca58b2776ebf491ed138e4a6b03dec7d0f418..5ddcee2332e3e524cc227fe808bc0daa87f55dce 100644 --- a/README.md +++ b/README.md @@ -114,6 +114,12 @@ $ make nodelink-controller ## Dev +- Generate code (if needed): + + ```sh + $ make generate + ``` + - Build: ```sh diff --git a/hack/gen-crd.sh b/hack/gen-crd.sh new file mode 100755 index 0000000000000000000000000000000000000000..52b9e9db277819f80e5296f10064706bcb11975f --- /dev/null +++ b/hack/gen-crd.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -eu + +echo "Building controller-gen tool..." +go build -o bin/controller-gen github.com/openshift/machine-api-operator/vendor/sigs.k8s.io/controller-tools/cmd/controller-gen + +dir=$(mktemp -d -t XXXXXXXX) +echo $dir +mkdir -p $dir/src/github.com/openshift/machine-api-operator/pkg/apis +mkdir -p $dir/src/github.com/openshift/machine-api-operator/vendor + +cp -r pkg/apis/healthchecking $dir/src/github.com/openshift/machine-api-operator/pkg/apis/. +cp -r vendor/github.com/openshift/cluster-api/pkg/apis/machine $dir/src/github.com/openshift/machine-api-operator/pkg/apis +# Some dependencies need to be coppied as well. Othwerwise, controller-gen will complain about non-existing kind Unsupported +cp -r vendor/k8s.io $dir/src/github.com/openshift/machine-api-operator/vendor/. +cp -r vendor/github.com $dir/src/github.com/openshift/machine-api-operator/vendor/. + +cwd=$(pwd) +pushd $dir/src/github.com/openshift/machine-api-operator +GOPATH=$dir ${cwd}/bin/controller-gen crd --domain openshift.io +popd + +echo "Coping generated CRDs" +cp $dir/src/github.com/openshift/machine-api-operator/config/crds/healthchecking_v1alpha1_machinehealthcheck.yaml install/0000_30_machine-api-operator_07_machinehealthcheck.crd.yaml +cp $dir/src/github.com/openshift/machine-api-operator/config/crds/machine_v1beta1_machinedeployment.yaml install/0000_30_machine-api-operator_04_machinedeployment.crd.yaml +cp $dir/src/github.com/openshift/machine-api-operator/config/crds/machine_v1beta1_machineset.yaml install/0000_30_machine-api-operator_03_machineset.crd.yaml +cp $dir/src/github.com/openshift/machine-api-operator/config/crds/machine_v1beta1_machine.yaml install/0000_30_machine-api-operator_02_machine.crd.yaml + +rm -rf $dir diff --git a/install/0000_30_machine-api-operator_02_machine.crd.yaml b/install/0000_30_machine-api-operator_02_machine.crd.yaml index 0489f5adfdc68cc04d9f610a8a1aadede2799ac1..16f73383edc2703a41661d42035c2335aa15b780 100644 --- a/install/0000_30_machine-api-operator_02_machine.crd.yaml +++ b/install/0000_30_machine-api-operator_02_machine.crd.yaml @@ -8,26 +8,27 @@ metadata: spec: additionalPrinterColumns: - JSONPath: .status.providerStatus.instanceId - name: Instance description: Instance ID of machine created in AWS + name: Instance type: string - JSONPath: .status.providerStatus.instanceState - name: State description: State of the AWS instance + name: State type: string - JSONPath: .spec.providerSpec.value.instanceType - name: Type description: Type of instance + name: Type type: string - JSONPath: .spec.providerSpec.value.placement.region - name: Region description: Region associated with machine + name: Region type: string - JSONPath: .spec.providerSpec.value.placement.availabilityZone - name: Zone description: Zone associated with machine + name: Zone type: string - JSONPath: .metadata.creationTimestamp + description: Machine age name: Age type: date group: machine.openshift.io @@ -55,19 +56,36 @@ spec: spec: properties: configSource: - description: To populate in the associated Node for dynamic kubelet - config. This field already exists in Node, so any updates to it in - the Machine spec will be automatically copied to the linked NodeRef - from the status. The rest of dynamic kubelet config support should - then work as-is. + description: ConfigSource is used to populate in the associated Node + for dynamic kubelet config. This field already exists in Node, so + any updates to it in the Machine spec will be automatically copied + to the linked NodeRef from the status. The rest of dynamic kubelet + config support should then work as-is. type: object metadata: - description: This ObjectMeta will autopopulate the Node created. Use - this to indicate what labels, annotations, name prefix, etc., should - be used when creating the Node. + description: ObjectMeta will autopopulate the Node created. Use this + to indicate what labels, annotations, name prefix, etc., should be + used when creating the Node. type: object + providerID: + description: ProviderID is the identification ID of the machine provided + by the provider. This field must match the provider ID as seen on + the node object corresponding to this machine. This field is required + by higher level consumers of cluster-api. Example use case is cluster + autoscaler with cluster-api as provider. Clean-up login in the autoscaler + compares machines v/s nodes to find out machines at provider which + could not get registered as Kubernetes nodes. With cluster-api as + a generic out-of-tree provider for autoscaler, this field is required + by autoscaler to be able to have a provider view of the list of machines. + Another list of nodes is queries from the k8s apiserver and then comparison + is done to find out unregistered machines and are marked for delete. + This field will be set by the actuators and consumed by higher level + entities like autoscaler who will be interfacing with cluster-api + as generic provider. + type: string providerSpec: - description: Provider-specific configuration to use during node creation. + description: ProviderSpec details Provider-specific configuration to + use during node creation. properties: value: description: Value is an inlined, serialized representation of the @@ -91,9 +109,9 @@ spec: type: object type: object taints: - description: The full, authoritative list of taints to apply to the - corresponding Node. This list will overwrite any modifications made - to the Node on an ongoing basis. + description: Taints is the full, authoritative list of taints to apply + to the corresponding Node. This list will overwrite any modifications + made to the Node on an ongoing basis. items: type: object type: array @@ -106,11 +124,12 @@ spec: spec missing this field at runtime is invalid. properties: controlPlane: - description: Semantic version of the Kubernetes control plane to - run. This should only be populated when the machine is a master. + description: ControlPlane is the semantic version of the Kubernetes + control plane to run. This should only be populated when the machine + is a control plane. type: string kubelet: - description: Semantic version of kubelet to run + description: Kubelet is the semantic version of kubelet to run type: string required: - kubelet @@ -127,24 +146,35 @@ spec: type: object type: array conditions: - description: 'List of conditions synced from the node conditions of - the corresponding node-object. Machine-controller is responsible for - keeping conditions up-to-date. MachineSet controller will be taking + description: 'Conditions lists the conditions synced from the node conditions + of the corresponding node-object. Machine-controller is responsible + for keeping conditions up-to-date. MachineSet controller will be taking these conditions as a signal to decide if machine is healthy or needs to be replaced. Refer: https://kubernetes.io/docs/concepts/architecture/nodes/#condition' items: type: object type: array errorMessage: + description: ErrorMessage will be set in the event that there is a terminal + problem reconciling the Machine and will contain a more verbose string + suitable for logging and human consumption. This field should not + be set for transitive errors that a controller faces that are expected + to be fixed automatically over time (like service outages), but instead + indicate that something is fundamentally wrong with the Machine's + spec or the configuration of the controller, and that manual intervention + is required. Examples of terminal errors would be invalid combinations + of settings in the spec, values that are unsupported by the controller, + or the responsible controller itself being critically misconfigured. Any + transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. type: string errorReason: - description: In the event that there is a terminal problem reconciling - the Machine, both ErrorReason and ErrorMessage will be set. ErrorReason - will be populated with a succinct value suitable for machine interpretation, - while ErrorMessage will contain a more verbose string suitable for - logging and human consumption. These fields should not be set for - transitive errors that a controller faces that are expected to be - fixed automatically over time (like service outages), but instead + description: ErrorReason will be set in the event that there is a terminal + problem reconciling the Machine and will contain a succinct value + suitable for machine interpretation. This field should not be set + for transitive errors that a controller faces that are expected to + be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations @@ -166,7 +196,7 @@ spec: last operation. type: string lastUpdated: - description: LastUpdateTime is the timestamp at which LastOperation + description: LastUpdated is the timestamp at which LastOperation API was last-updated. format: date-time type: string @@ -180,41 +210,41 @@ spec: type: string type: object lastUpdated: - description: When was this status last observed + description: LastUpdated identifies when this status was last observed. format: date-time type: string nodeRef: - description: If the corresponding Node exists, this will point to its - object. + description: NodeRef will point to the corresponding Node if it exists. type: object phase: description: Phase represents the current phase of machine actuation. E.g. Pending, Running, Terminating, Failed etc. type: string providerStatus: - description: Provider-specific status. It is recommended that providers - maintain their own versioned API types that should be serialized/deserialized - from this field. + description: ProviderStatus details a Provider-specific status. It is + recommended that providers maintain their own versioned API types + that should be serialized/deserialized from this field. type: object versions: - description: 'The current versions of software on the corresponding - Node (if it exists). This is provided for a few reasons: 1) It is - more convenient than checking the NodeRef, traversing it to the - Node, and finding the appropriate field in Node.Status.NodeInfo (which + description: 'Versions specifies the current versions of software on + the corresponding Node (if it exists). This is provided for a few + reasons: 1) It is more convenient than checking the NodeRef, traversing + it to the Node, and finding the appropriate field in Node.Status.NodeInfo (which uses different field names and formatting). 2) It removes some of the dependency on the structure of the Node, so that if the structure of Node.Status.NodeInfo changes, only machine controllers need to be updated, rather than every client of the Machines API. 3) - There is no other simple way to check the ControlPlane version. + There is no other simple way to check the control plane version. A client would have to connect directly to the apiserver running on the target node in order to find out its version.' properties: controlPlane: - description: Semantic version of the Kubernetes control plane to - run. This should only be populated when the machine is a master. + description: ControlPlane is the semantic version of the Kubernetes + control plane to run. This should only be populated when the machine + is a control plane. type: string kubelet: - description: Semantic version of kubelet to run + description: Kubelet is the semantic version of kubelet to run type: string required: - kubelet diff --git a/install/0000_30_machine-api-operator_03_machineset.crd.yaml b/install/0000_30_machine-api-operator_03_machineset.crd.yaml index 5bebdac2b2641d1261f8dd5abf1aab857207ec9f..c35546dbdf29ced44e5f24317a245c522a8df31b 100644 --- a/install/0000_30_machine-api-operator_03_machineset.crd.yaml +++ b/install/0000_30_machine-api-operator_03_machineset.crd.yaml @@ -6,6 +6,27 @@ metadata: controller-tools.k8s.io: "1.0" name: machinesets.machine.openshift.io spec: + additionalPrinterColumns: + - JSONPath: .spec.replicas + description: Desired Replicas + name: Desired + type: integer + - JSONPath: .status.replicas + description: Current Replicas + name: Current + type: integer + - JSONPath: .status.readyReplicas + description: Ready Replicas + name: Ready + type: integer + - JSONPath: .status.availableReplicas + description: Observed number of available replicas + name: Available + type: string + - JSONPath: .metadata.creationTimestamp + description: Machineset age + name: Age + type: date group: machine.openshift.io names: kind: MachineSet @@ -64,20 +85,39 @@ spec: More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' properties: configSource: - description: To populate in the associated Node for dynamic - kubelet config. This field already exists in Node, so any - updates to it in the Machine spec will be automatically copied - to the linked NodeRef from the status. The rest of dynamic - kubelet config support should then work as-is. + description: ConfigSource is used to populate in the associated + Node for dynamic kubelet config. This field already exists + in Node, so any updates to it in the Machine spec will be + automatically copied to the linked NodeRef from the status. + The rest of dynamic kubelet config support should then work + as-is. type: object metadata: - description: This ObjectMeta will autopopulate the Node created. + description: ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. type: object + providerID: + description: ProviderID is the identification ID of the machine + provided by the provider. This field must match the provider + ID as seen on the node object corresponding to this machine. + This field is required by higher level consumers of cluster-api. + Example use case is cluster autoscaler with cluster-api as + provider. Clean-up login in the autoscaler compares machines + v/s nodes to find out machines at provider which could not + get registered as Kubernetes nodes. With cluster-api as a + generic out-of-tree provider for autoscaler, this field is + required by autoscaler to be able to have a provider view + of the list of machines. Another list of nodes is queries + from the k8s apiserver and then comparison is done to find + out unregistered machines and are marked for delete. This + field will be set by the actuators and consumed by higher + level entities like autoscaler who will be interfacing with + cluster-api as generic provider. + type: string providerSpec: - description: Provider-specific configuration to use during node - creation. + description: ProviderSpec details Provider-specific configuration + to use during node creation. properties: value: description: Value is an inlined, serialized representation @@ -102,9 +142,9 @@ spec: type: object type: object taints: - description: The full, authoritative list of taints to apply - to the corresponding Node. This list will overwrite any modifications - made to the Node on an ongoing basis. + description: Taints is the full, authoritative list of taints + to apply to the corresponding Node. This list will overwrite + any modifications made to the Node on an ongoing basis. items: type: object type: array @@ -118,12 +158,13 @@ spec: this field at runtime is invalid. properties: controlPlane: - description: Semantic version of the Kubernetes control - plane to run. This should only be populated when the machine - is a master. + description: ControlPlane is the semantic version of the + Kubernetes control plane to run. This should only be populated + when the machine is a control plane. type: string kubelet: - description: Semantic version of kubelet to run + description: Kubelet is the semantic version of kubelet + to run type: string required: - kubelet @@ -184,26 +225,6 @@ spec: - replicas type: object version: v1beta1 - additionalPrinterColumns: - - JSONPath: .spec.replicas - description: Desired Replicas - name: Desired - type: integer - - JSONPath: .status.replicas - description: Current Replicas - name: Current - type: integer - - JSONPath: .status.readyReplicas - description: Ready Replicas - name: Ready - type: integer - - JSONPath: .status.availableReplicas - name: Available - description: Observed number of available replicas - type: string - - JSONPath: .metadata.creationTimestamp - name: Age - type: date status: acceptedNames: kind: "" diff --git a/install/0000_30_machine-api-operator_04_machinedeployment.crd.yaml b/install/0000_30_machine-api-operator_04_machinedeployment.crd.yaml index 83b3aa648f89c6c02458b752fa451231c707be00..3a0844f64123cdfb3b6f6c6bc4b2a4c1da632927 100644 --- a/install/0000_30_machine-api-operator_04_machinedeployment.crd.yaml +++ b/install/0000_30_machine-api-operator_04_machinedeployment.crd.yaml @@ -77,6 +77,9 @@ spec: = RollingUpdate. properties: maxSurge: + anyOf: + - type: string + - type: integer description: 'The maximum number of machines that can be scheduled above the desired number of machines. Value can be an absolute number (ex: 5) or a percentage of desired machines (ex: 10%). @@ -89,10 +92,10 @@ spec: new MachineSet can be scaled up further, ensuring that total number of machines running at any time during the update is at most 130% of desired machines.' - oneOf: + maxUnavailable: + anyOf: - type: string - type: integer - maxUnavailable: description: 'The maximum number of machines that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired machines (ex: 10%). Absolute number @@ -104,9 +107,6 @@ spec: down further, followed by scaling up the new MachineSet, ensuring that the total number of machines available at all times during the update is at least 70% of desired machines.' - oneOf: - - type: string - - type: integer type: object type: description: Type of deployment. Currently the only supported strategy @@ -124,20 +124,39 @@ spec: More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' properties: configSource: - description: To populate in the associated Node for dynamic - kubelet config. This field already exists in Node, so any - updates to it in the Machine spec will be automatically copied - to the linked NodeRef from the status. The rest of dynamic - kubelet config support should then work as-is. + description: ConfigSource is used to populate in the associated + Node for dynamic kubelet config. This field already exists + in Node, so any updates to it in the Machine spec will be + automatically copied to the linked NodeRef from the status. + The rest of dynamic kubelet config support should then work + as-is. type: object metadata: - description: This ObjectMeta will autopopulate the Node created. + description: ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. type: object + providerID: + description: ProviderID is the identification ID of the machine + provided by the provider. This field must match the provider + ID as seen on the node object corresponding to this machine. + This field is required by higher level consumers of cluster-api. + Example use case is cluster autoscaler with cluster-api as + provider. Clean-up login in the autoscaler compares machines + v/s nodes to find out machines at provider which could not + get registered as Kubernetes nodes. With cluster-api as a + generic out-of-tree provider for autoscaler, this field is + required by autoscaler to be able to have a provider view + of the list of machines. Another list of nodes is queries + from the k8s apiserver and then comparison is done to find + out unregistered machines and are marked for delete. This + field will be set by the actuators and consumed by higher + level entities like autoscaler who will be interfacing with + cluster-api as generic provider. + type: string providerSpec: - description: Provider-specific configuration to use during node - creation. + description: ProviderSpec details Provider-specific configuration + to use during node creation. properties: value: description: Value is an inlined, serialized representation @@ -162,9 +181,9 @@ spec: type: object type: object taints: - description: The full, authoritative list of taints to apply - to the corresponding Node. This list will overwrite any modifications - made to the Node on an ongoing basis. + description: Taints is the full, authoritative list of taints + to apply to the corresponding Node. This list will overwrite + any modifications made to the Node on an ongoing basis. items: type: object type: array @@ -178,12 +197,13 @@ spec: this field at runtime is invalid. properties: controlPlane: - description: Semantic version of the Kubernetes control - plane to run. This should only be populated when the machine - is a master. + description: ControlPlane is the semantic version of the + Kubernetes control plane to run. This should only be populated + when the machine is a control plane. type: string kubelet: - description: Semantic version of kubelet to run + description: Kubelet is the semantic version of kubelet + to run type: string required: - kubelet diff --git a/install/0000_30_machine-api-operator_07_machinehealthcheck.crd.yaml b/install/0000_30_machine-api-operator_07_machinehealthcheck.crd.yaml index 4034d1c7ae7d1b932e0cba7d03671d0435c44673..032f370a9621ed9d024caa92caf366c42be8e40a 100644 --- a/install/0000_30_machine-api-operator_07_machinehealthcheck.crd.yaml +++ b/install/0000_30_machine-api-operator_07_machinehealthcheck.crd.yaml @@ -1,22 +1,44 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" name: machinehealthchecks.healthchecking.openshift.io spec: group: healthchecking.openshift.io names: kind: MachineHealthCheck - listKind: MachineHealthCheckList plural: machinehealthchecks - singular: machinehealthcheck scope: Namespaced validation: openAPIV3Schema: properties: apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' type: string kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' type: string metadata: type: object + spec: + properties: + selector: + type: object + required: + - selector + type: object + status: + type: object version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/gobuffalo/envy/LICENSE.txt b/vendor/github.com/gobuffalo/envy/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..123ddc0d80436ce3b28f77a96b2bff6ab947e456 --- /dev/null +++ b/vendor/github.com/gobuffalo/envy/LICENSE.txt @@ -0,0 +1,8 @@ +The MIT License (MIT) +Copyright (c) 2018 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/gobuffalo/envy/envy.go b/vendor/github.com/gobuffalo/envy/envy.go new file mode 100644 index 0000000000000000000000000000000000000000..d3995f45577478d43db4638eb0e8233b49420c27 --- /dev/null +++ b/vendor/github.com/gobuffalo/envy/envy.go @@ -0,0 +1,268 @@ +/* +package envy makes working with ENV variables in Go trivial. + +* Get ENV variables with default values. +* Set ENV variables safely without affecting the underlying system. +* Temporarily change ENV vars; useful for testing. +* Map all of the key/values in the ENV. +* Loads .env files (by using [godotenv](https://github.com/joho/godotenv/)) +* More! +*/ +package envy + +import ( + "errors" + "flag" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "sync" + + "github.com/joho/godotenv" + "github.com/rogpeppe/go-internal/modfile" +) + +var gil = &sync.RWMutex{} +var env = map[string]string{} + +// GO111MODULE is ENV for turning mods on/off +const GO111MODULE = "GO111MODULE" + +func init() { + Load() + loadEnv() +} + +// Load the ENV variables to the env map +func loadEnv() { + gil.Lock() + defer gil.Unlock() + + if os.Getenv("GO_ENV") == "" { + // if the flag "test.v" is *defined*, we're running as a unit test. Note that we don't care + // about v.Value (verbose test mode); we just want to know if the test environment has defined + // it. It's also possible that the flags are not yet fully parsed (i.e. flag.Parsed() == false), + // so we could not depend on v.Value anyway. + // + if v := flag.Lookup("test.v"); v != nil { + env["GO_ENV"] = "test" + } + } + + // set the GOPATH if using >= 1.8 and the GOPATH isn't set + if os.Getenv("GOPATH") == "" { + out, err := exec.Command("go", "env", "GOPATH").Output() + if err == nil { + gp := strings.TrimSpace(string(out)) + os.Setenv("GOPATH", gp) + } + } + + for _, e := range os.Environ() { + pair := strings.Split(e, "=") + env[pair[0]] = os.Getenv(pair[0]) + } +} + +func Mods() bool { + return Get(GO111MODULE, "off") == "on" +} + +// Reload the ENV variables. Useful if +// an external ENV manager has been used +func Reload() { + env = map[string]string{} + loadEnv() +} + +// Load .env files. Files will be loaded in the same order that are received. +// Redefined vars will override previously existing values. +// IE: envy.Load(".env", "test_env/.env") will result in DIR=test_env +// If no arg passed, it will try to load a .env file. +func Load(files ...string) error { + + // If no files received, load the default one + if len(files) == 0 { + err := godotenv.Overload() + if err == nil { + Reload() + } + return err + } + + // We received a list of files + for _, file := range files { + + // Check if it exists or we can access + if _, err := os.Stat(file); err != nil { + // It does not exist or we can not access. + // Return and stop loading + return err + } + + // It exists and we have permission. Load it + if err := godotenv.Overload(file); err != nil { + return err + } + + // Reload the env so all new changes are noticed + Reload() + + } + return nil +} + +// Get a value from the ENV. If it doesn't exist the +// default value will be returned. +func Get(key string, value string) string { + gil.RLock() + defer gil.RUnlock() + if v, ok := env[key]; ok { + return v + } + return value +} + +// Get a value from the ENV. If it doesn't exist +// an error will be returned +func MustGet(key string) (string, error) { + gil.RLock() + defer gil.RUnlock() + if v, ok := env[key]; ok { + return v, nil + } + return "", fmt.Errorf("could not find ENV var with %s", key) +} + +// Set a value into the ENV. This is NOT permanent. It will +// only affect values accessed through envy. +func Set(key string, value string) { + gil.Lock() + defer gil.Unlock() + env[key] = value +} + +// MustSet the value into the underlying ENV, as well as envy. +// This may return an error if there is a problem setting the +// underlying ENV value. +func MustSet(key string, value string) error { + gil.Lock() + defer gil.Unlock() + err := os.Setenv(key, value) + if err != nil { + return err + } + env[key] = value + return nil +} + +// Map all of the keys/values set in envy. +func Map() map[string]string { + gil.RLock() + defer gil.RUnlock() + cp := map[string]string{} + for k, v := range env { + cp[k] = v + } + return env +} + +// Temp makes a copy of the values and allows operation on +// those values temporarily during the run of the function. +// At the end of the function run the copy is discarded and +// the original values are replaced. This is useful for testing. +// Warning: This function is NOT safe to use from a goroutine or +// from code which may access any Get or Set function from a goroutine +func Temp(f func()) { + oenv := env + env = map[string]string{} + for k, v := range oenv { + env[k] = v + } + defer func() { env = oenv }() + f() +} + +func GoPath() string { + return Get("GOPATH", "") +} + +func GoBin() string { + return Get("GO_BIN", "go") +} + +func InGoPath() bool { + pwd, _ := os.Getwd() + for _, p := range GoPaths() { + if strings.HasPrefix(pwd, p) { + return true + } + } + return false +} + +// GoPaths returns all possible GOPATHS that are set. +func GoPaths() []string { + gp := Get("GOPATH", "") + if runtime.GOOS == "windows" { + return strings.Split(gp, ";") // Windows uses a different separator + } + return strings.Split(gp, ":") +} + +func importPath(path string) string { + path = strings.TrimPrefix(path, "/private") + for _, gopath := range GoPaths() { + srcpath := filepath.Join(gopath, "src") + rel, err := filepath.Rel(srcpath, path) + if err == nil { + return filepath.ToSlash(rel) + } + } + + // fallback to trim + rel := strings.TrimPrefix(path, filepath.Join(GoPath(), "src")) + rel = strings.TrimPrefix(rel, string(filepath.Separator)) + return filepath.ToSlash(rel) +} + +// CurrentModule will attempt to return the module name from `go.mod` if +// modules are enabled. +// If modules are not enabled it will fallback to using CurrentPackage instead. +func CurrentModule() (string, error) { + if !Mods() { + return CurrentPackage(), nil + } + moddata, err := ioutil.ReadFile("go.mod") + if err != nil { + return "", errors.New("go.mod cannot be read or does not exist while go module is enabled") + } + packagePath := modfile.ModulePath(moddata) + if packagePath == "" { + return "", errors.New("go.mod is malformed") + } + return packagePath, nil +} + +// CurrentPackage attempts to figure out the current package name from the PWD +// Use CurrentModule for a more accurate package name. +func CurrentPackage() string { + if Mods() { + } + pwd, _ := os.Getwd() + return importPath(pwd) +} + +func Environ() []string { + gil.RLock() + defer gil.RUnlock() + var e []string + for k, v := range env { + e = append(e, fmt.Sprintf("%s=%s", k, v)) + } + return e +} diff --git a/vendor/github.com/gobuffalo/envy/version.go b/vendor/github.com/gobuffalo/envy/version.go new file mode 100644 index 0000000000000000000000000000000000000000..0bff116a5db610a8ba793b004a0eadb8946b968a --- /dev/null +++ b/vendor/github.com/gobuffalo/envy/version.go @@ -0,0 +1,3 @@ +package envy + +const Version = "v1.6.15" diff --git a/vendor/github.com/joho/godotenv/LICENCE b/vendor/github.com/joho/godotenv/LICENCE new file mode 100644 index 0000000000000000000000000000000000000000..e7ddd51be9033398af8a84271f09eefdbedcdac5 --- /dev/null +++ b/vendor/github.com/joho/godotenv/LICENCE @@ -0,0 +1,23 @@ +Copyright (c) 2013 John Barton + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/joho/godotenv/godotenv.go b/vendor/github.com/joho/godotenv/godotenv.go new file mode 100644 index 0000000000000000000000000000000000000000..29b436c77c0889321ceb30d243eda336d90129d9 --- /dev/null +++ b/vendor/github.com/joho/godotenv/godotenv.go @@ -0,0 +1,346 @@ +// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) +// +// Examples/readme can be found on the github page at https://github.com/joho/godotenv +// +// The TL;DR is that you make a .env file that looks something like +// +// SOME_ENV_VAR=somevalue +// +// and then in your go code you can call +// +// godotenv.Load() +// +// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR") +package godotenv + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "sort" + "strings" +) + +const doubleQuoteSpecialChars = "\\\n\r\"!$`" + +// Load will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Load without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Load("fileone", "filetwo") +// +// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults +func Load(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, false) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Overload will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Overload without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Overload("fileone", "filetwo") +// +// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars. +func Overload(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, true) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Read all env (with same file loading semantics as Load) but return values as +// a map rather than automatically writing values into env +func Read(filenames ...string) (envMap map[string]string, err error) { + filenames = filenamesOrDefault(filenames) + envMap = make(map[string]string) + + for _, filename := range filenames { + individualEnvMap, individualErr := readFile(filename) + + if individualErr != nil { + err = individualErr + return // return early on a spazout + } + + for key, value := range individualEnvMap { + envMap[key] = value + } + } + + return +} + +// Parse reads an env file from io.Reader, returning a map of keys and values. +func Parse(r io.Reader) (envMap map[string]string, err error) { + envMap = make(map[string]string) + + var lines []string + scanner := bufio.NewScanner(r) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + + if err = scanner.Err(); err != nil { + return + } + + for _, fullLine := range lines { + if !isIgnoredLine(fullLine) { + var key, value string + key, value, err = parseLine(fullLine, envMap) + + if err != nil { + return + } + envMap[key] = value + } + } + return +} + +//Unmarshal reads an env file from a string, returning a map of keys and values. +func Unmarshal(str string) (envMap map[string]string, err error) { + return Parse(strings.NewReader(str)) +} + +// Exec loads env vars from the specified filenames (empty map falls back to default) +// then executes the cmd specified. +// +// Simply hooks up os.Stdin/err/out to the command and calls Run() +// +// If you want more fine grained control over your command it's recommended +// that you use `Load()` or `Read()` and the `os/exec` package yourself. +func Exec(filenames []string, cmd string, cmdArgs []string) error { + Load(filenames...) + + command := exec.Command(cmd, cmdArgs...) + command.Stdin = os.Stdin + command.Stdout = os.Stdout + command.Stderr = os.Stderr + return command.Run() +} + +// Write serializes the given environment and writes it to a file +func Write(envMap map[string]string, filename string) error { + content, error := Marshal(envMap) + if error != nil { + return error + } + file, error := os.Create(filename) + if error != nil { + return error + } + _, err := file.WriteString(content) + return err +} + +// Marshal outputs the given environment as a dotenv-formatted environment file. +// Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped. +func Marshal(envMap map[string]string) (string, error) { + lines := make([]string, 0, len(envMap)) + for k, v := range envMap { + lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v))) + } + sort.Strings(lines) + return strings.Join(lines, "\n"), nil +} + +func filenamesOrDefault(filenames []string) []string { + if len(filenames) == 0 { + return []string{".env"} + } + return filenames +} + +func loadFile(filename string, overload bool) error { + envMap, err := readFile(filename) + if err != nil { + return err + } + + currentEnv := map[string]bool{} + rawEnv := os.Environ() + for _, rawEnvLine := range rawEnv { + key := strings.Split(rawEnvLine, "=")[0] + currentEnv[key] = true + } + + for key, value := range envMap { + if !currentEnv[key] || overload { + os.Setenv(key, value) + } + } + + return nil +} + +func readFile(filename string) (envMap map[string]string, err error) { + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + + return Parse(file) +} + +func parseLine(line string, envMap map[string]string) (key string, value string, err error) { + if len(line) == 0 { + err = errors.New("zero length string") + return + } + + // ditch the comments (but keep quoted hashes) + if strings.Contains(line, "#") { + segmentsBetweenHashes := strings.Split(line, "#") + quotesAreOpen := false + var segmentsToKeep []string + for _, segment := range segmentsBetweenHashes { + if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 { + if quotesAreOpen { + quotesAreOpen = false + segmentsToKeep = append(segmentsToKeep, segment) + } else { + quotesAreOpen = true + } + } + + if len(segmentsToKeep) == 0 || quotesAreOpen { + segmentsToKeep = append(segmentsToKeep, segment) + } + } + + line = strings.Join(segmentsToKeep, "#") + } + + firstEquals := strings.Index(line, "=") + firstColon := strings.Index(line, ":") + splitString := strings.SplitN(line, "=", 2) + if firstColon != -1 && (firstColon < firstEquals || firstEquals == -1) { + //this is a yaml-style line + splitString = strings.SplitN(line, ":", 2) + } + + if len(splitString) != 2 { + err = errors.New("Can't separate key from value") + return + } + + // Parse the key + key = splitString[0] + if strings.HasPrefix(key, "export") { + key = strings.TrimPrefix(key, "export") + } + key = strings.Trim(key, " ") + + // Parse the value + value = parseValue(splitString[1], envMap) + return +} + +func parseValue(value string, envMap map[string]string) string { + + // trim + value = strings.Trim(value, " ") + + // check if we've got quoted values or possible escapes + if len(value) > 1 { + rs := regexp.MustCompile(`\A'(.*)'\z`) + singleQuotes := rs.FindStringSubmatch(value) + + rd := regexp.MustCompile(`\A"(.*)"\z`) + doubleQuotes := rd.FindStringSubmatch(value) + + if singleQuotes != nil || doubleQuotes != nil { + // pull the quotes off the edges + value = value[1 : len(value)-1] + } + + if doubleQuotes != nil { + // expand newlines + escapeRegex := regexp.MustCompile(`\\.`) + value = escapeRegex.ReplaceAllStringFunc(value, func(match string) string { + c := strings.TrimPrefix(match, `\`) + switch c { + case "n": + return "\n" + case "r": + return "\r" + default: + return match + } + }) + // unescape characters + e := regexp.MustCompile(`\\([^$])`) + value = e.ReplaceAllString(value, "$1") + } + + if singleQuotes == nil { + value = expandVariables(value, envMap) + } + } + + return value +} + +func expandVariables(v string, m map[string]string) string { + r := regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`) + + return r.ReplaceAllStringFunc(v, func(s string) string { + submatch := r.FindStringSubmatch(s) + + if submatch == nil { + return s + } + if submatch[1] == "\\" || submatch[2] == "(" { + return submatch[0][1:] + } else if submatch[4] != "" { + return m[submatch[4]] + } + return s + }) +} + +func isIgnoredLine(line string) bool { + trimmedLine := strings.Trim(line, " \n\t") + return len(trimmedLine) == 0 || strings.HasPrefix(trimmedLine, "#") +} + +func doubleQuoteEscape(line string) string { + for _, c := range doubleQuoteSpecialChars { + toReplace := "\\" + string(c) + if c == '\n' { + toReplace = `\n` + } + if c == '\r' { + toReplace = `\r` + } + line = strings.Replace(line, string(c), toReplace, -1) + } + return line +} diff --git a/vendor/github.com/markbates/inflect/LICENCE b/vendor/github.com/markbates/inflect/LICENCE new file mode 100644 index 0000000000000000000000000000000000000000..8a36b944a5ee4ab9a42c4803838a5a15c476e6a3 --- /dev/null +++ b/vendor/github.com/markbates/inflect/LICENCE @@ -0,0 +1,7 @@ +Copyright (c) 2011 Chris Farmiloe + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/markbates/inflect/helpers.go b/vendor/github.com/markbates/inflect/helpers.go new file mode 100644 index 0000000000000000000000000000000000000000..24050c70a04f72e08579fb76d013cad49536ee0c --- /dev/null +++ b/vendor/github.com/markbates/inflect/helpers.go @@ -0,0 +1,19 @@ +package inflect + +//Helpers is a map of the helper names with its corresponding inflect function +var Helpers = map[string]interface{}{ + "asciffy": Asciify, + "camelize": Camelize, + "camelize_down_first": CamelizeDownFirst, + "capitalize": Capitalize, + "dasherize": Dasherize, + "humanize": Humanize, + "ordinalize": Ordinalize, + "parameterize": Parameterize, + "pluralize": Pluralize, + "pluralize_with_size": PluralizeWithSize, + "singularize": Singularize, + "tableize": Tableize, + "typeify": Typeify, + "underscore": Underscore, +} diff --git a/vendor/github.com/markbates/inflect/inflect.go b/vendor/github.com/markbates/inflect/inflect.go new file mode 100644 index 0000000000000000000000000000000000000000..9b6776c191ca5666fa28f223397546c61c57a952 --- /dev/null +++ b/vendor/github.com/markbates/inflect/inflect.go @@ -0,0 +1,892 @@ +package inflect + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// baseAcronyms comes from https://en.wikipedia.org/wiki/List_of_information_technology_acronymss +const baseAcronyms = `JSON,JWT,ID,UUID,SQL,ACK,ACL,ADSL,AES,ANSI,API,ARP,ATM,BGP,BSS,CAT,CCITT,CHAP,CIDR,CIR,CLI,CPE,CPU,CRC,CRT,CSMA,CMOS,DCE,DEC,DES,DHCP,DNS,DRAM,DSL,DSLAM,DTE,DMI,EHA,EIA,EIGRP,EOF,ESS,FCC,FCS,FDDI,FTP,GBIC,gbps,GEPOF,HDLC,HTTP,HTTPS,IANA,ICMP,IDF,IDS,IEEE,IETF,IMAP,IP,IPS,ISDN,ISP,kbps,LACP,LAN,LAPB,LAPF,LLC,MAC,MAN,Mbps,MC,MDF,MIB,MoCA,MPLS,MTU,NAC,NAT,NBMA,NIC,NRZ,NRZI,NVRAM,OSI,OSPF,OUI,PAP,PAT,PC,PIM,PIM,PCM,PDU,POP3,POP,POTS,PPP,PPTP,PTT,PVST,RADIUS,RAM,RARP,RFC,RIP,RLL,ROM,RSTP,RTP,RCP,SDLC,SFD,SFP,SLARP,SLIP,SMTP,SNA,SNAP,SNMP,SOF,SRAM,SSH,SSID,STP,SYN,TDM,TFTP,TIA,TOFU,UDP,URL,URI,USB,UTP,VC,VLAN,VLSM,VPN,W3C,WAN,WEP,WiFi,WPA,WWW` + +// Rule used by rulesets +type Rule struct { + suffix string + replacement string + exact bool +} + +// Ruleset a Ruleset is the config of pluralization rules +// you can extend the rules with the Add* methods +type Ruleset struct { + uncountables map[string]bool + plurals []*Rule + singulars []*Rule + humans []*Rule + acronyms []*Rule +} + +// NewRuleset creates a blank ruleset. Unless you are going to +// build your own rules from scratch you probably +// won't need this and can just use the defaultRuleset +// via the global inflect.* methods +func NewRuleset() *Ruleset { + rs := new(Ruleset) + rs.uncountables = make(map[string]bool) + rs.plurals = make([]*Rule, 0) + rs.singulars = make([]*Rule, 0) + rs.humans = make([]*Rule, 0) + rs.acronyms = make([]*Rule, 0) + return rs +} + +// NewDefaultRuleset creates a new ruleset and load it with the default +// set of common English pluralization rules +func NewDefaultRuleset() *Ruleset { + rs := NewRuleset() + rs.AddPlural("movie", "movies") + rs.AddPlural("s", "s") + rs.AddPlural("testis", "testes") + rs.AddPlural("axis", "axes") + rs.AddPlural("octopus", "octopi") + rs.AddPlural("virus", "viri") + rs.AddPlural("octopi", "octopi") + rs.AddPlural("viri", "viri") + rs.AddPlural("alias", "aliases") + rs.AddPlural("status", "statuses") + rs.AddPlural("Status", "Statuses") + rs.AddPlural("campus", "campuses") + rs.AddPlural("bus", "buses") + rs.AddPlural("buffalo", "buffaloes") + rs.AddPlural("tomato", "tomatoes") + rs.AddPlural("tum", "ta") + rs.AddPlural("ium", "ia") + rs.AddPlural("ta", "ta") + rs.AddPlural("ia", "ia") + rs.AddPlural("sis", "ses") + rs.AddPlural("lf", "lves") + rs.AddPlural("rf", "rves") + rs.AddPlural("afe", "aves") + rs.AddPlural("bfe", "bves") + rs.AddPlural("cfe", "cves") + rs.AddPlural("dfe", "dves") + rs.AddPlural("efe", "eves") + rs.AddPlural("gfe", "gves") + rs.AddPlural("hfe", "hves") + rs.AddPlural("ife", "ives") + rs.AddPlural("jfe", "jves") + rs.AddPlural("kfe", "kves") + rs.AddPlural("lfe", "lves") + rs.AddPlural("mfe", "mves") + rs.AddPlural("nfe", "nves") + rs.AddPlural("ofe", "oves") + rs.AddPlural("pfe", "pves") + rs.AddPlural("qfe", "qves") + rs.AddPlural("rfe", "rves") + rs.AddPlural("sfe", "sves") + rs.AddPlural("tfe", "tves") + rs.AddPlural("ufe", "uves") + rs.AddPlural("vfe", "vves") + rs.AddPlural("wfe", "wves") + rs.AddPlural("xfe", "xves") + rs.AddPlural("yfe", "yves") + rs.AddPlural("zfe", "zves") + rs.AddPlural("hive", "hives") + rs.AddPlural("quy", "quies") + rs.AddPlural("by", "bies") + rs.AddPlural("cy", "cies") + rs.AddPlural("dy", "dies") + rs.AddPlural("fy", "fies") + rs.AddPlural("gy", "gies") + rs.AddPlural("hy", "hies") + rs.AddPlural("jy", "jies") + rs.AddPlural("ky", "kies") + rs.AddPlural("ly", "lies") + rs.AddPlural("my", "mies") + rs.AddPlural("ny", "nies") + rs.AddPlural("py", "pies") + rs.AddPlural("qy", "qies") + rs.AddPlural("ry", "ries") + rs.AddPlural("sy", "sies") + rs.AddPlural("ty", "ties") + rs.AddPlural("vy", "vies") + rs.AddPlural("wy", "wies") + rs.AddPlural("xy", "xies") + rs.AddPlural("zy", "zies") + rs.AddPlural("x", "xes") + rs.AddPlural("ch", "ches") + rs.AddPlural("ss", "sses") + rs.AddPlural("sh", "shes") + rs.AddPlural("matrix", "matrices") + rs.AddPlural("vertix", "vertices") + rs.AddPlural("indix", "indices") + rs.AddPlural("matrex", "matrices") + rs.AddPlural("vertex", "vertices") + rs.AddPlural("index", "indices") + rs.AddPlural("mouse", "mice") + rs.AddPlural("louse", "lice") + rs.AddPlural("mice", "mice") + rs.AddPlural("lice", "lice") + rs.AddPlural("ress", "resses") + rs.AddPluralExact("ox", "oxen", true) + rs.AddPluralExact("oxen", "oxen", true) + rs.AddPluralExact("quiz", "quizzes", true) + rs.AddSingular("s", "") + rs.AddSingular("ss", "ss") + rs.AddSingular("news", "news") + rs.AddSingular("ta", "tum") + rs.AddSingular("ia", "ium") + rs.AddSingular("analyses", "analysis") + rs.AddSingular("bases", "basis") + rs.AddSingularExact("basis", "basis", true) + rs.AddSingular("diagnoses", "diagnosis") + rs.AddSingularExact("diagnosis", "diagnosis", true) + rs.AddSingular("parentheses", "parenthesis") + rs.AddSingular("prognoses", "prognosis") + rs.AddSingular("synopses", "synopsis") + rs.AddSingular("theses", "thesis") + rs.AddSingular("analyses", "analysis") + rs.AddSingularExact("analysis", "analysis", true) + rs.AddSingular("ovies", "ovie") + rs.AddSingular("aves", "afe") + rs.AddSingular("bves", "bfe") + rs.AddSingular("cves", "cfe") + rs.AddSingular("dves", "dfe") + rs.AddSingular("eves", "efe") + rs.AddSingular("gves", "gfe") + rs.AddSingular("hves", "hfe") + rs.AddSingular("ives", "ife") + rs.AddSingular("jves", "jfe") + rs.AddSingular("kves", "kfe") + rs.AddSingular("lves", "lfe") + rs.AddSingular("mves", "mfe") + rs.AddSingular("nves", "nfe") + rs.AddSingular("oves", "ofe") + rs.AddSingular("pves", "pfe") + rs.AddSingular("qves", "qfe") + rs.AddSingular("rves", "rfe") + rs.AddSingular("sves", "sfe") + rs.AddSingular("tves", "tfe") + rs.AddSingular("uves", "ufe") + rs.AddSingular("vves", "vfe") + rs.AddSingular("wves", "wfe") + rs.AddSingular("xves", "xfe") + rs.AddSingular("yves", "yfe") + rs.AddSingular("zves", "zfe") + rs.AddSingular("hives", "hive") + rs.AddSingular("tives", "tive") + rs.AddSingular("lves", "lf") + rs.AddSingular("rves", "rf") + rs.AddSingular("quies", "quy") + rs.AddSingular("bies", "by") + rs.AddSingular("cies", "cy") + rs.AddSingular("dies", "dy") + rs.AddSingular("fies", "fy") + rs.AddSingular("gies", "gy") + rs.AddSingular("hies", "hy") + rs.AddSingular("jies", "jy") + rs.AddSingular("kies", "ky") + rs.AddSingular("lies", "ly") + rs.AddSingular("mies", "my") + rs.AddSingular("nies", "ny") + rs.AddSingular("pies", "py") + rs.AddSingular("qies", "qy") + rs.AddSingular("ries", "ry") + rs.AddSingular("sies", "sy") + rs.AddSingular("ties", "ty") + // rs.AddSingular("vies", "vy") + rs.AddSingular("wies", "wy") + rs.AddSingular("xies", "xy") + rs.AddSingular("zies", "zy") + rs.AddSingular("series", "series") + rs.AddSingular("xes", "x") + rs.AddSingular("ches", "ch") + rs.AddSingular("sses", "ss") + rs.AddSingular("shes", "sh") + rs.AddSingular("mice", "mouse") + rs.AddSingular("lice", "louse") + rs.AddSingular("buses", "bus") + rs.AddSingularExact("bus", "bus", true) + rs.AddSingular("oes", "o") + rs.AddSingular("shoes", "shoe") + rs.AddSingular("crises", "crisis") + rs.AddSingularExact("crisis", "crisis", true) + rs.AddSingular("axes", "axis") + rs.AddSingularExact("axis", "axis", true) + rs.AddSingular("testes", "testis") + rs.AddSingularExact("testis", "testis", true) + rs.AddSingular("octopi", "octopus") + rs.AddSingularExact("octopus", "octopus", true) + rs.AddSingular("viri", "virus") + rs.AddSingularExact("virus", "virus", true) + rs.AddSingular("statuses", "status") + rs.AddSingular("Statuses", "Status") + rs.AddSingular("campuses", "campus") + rs.AddSingularExact("status", "status", true) + rs.AddSingularExact("Status", "Status", true) + rs.AddSingularExact("campus", "campus", true) + rs.AddSingular("aliases", "alias") + rs.AddSingularExact("alias", "alias", true) + rs.AddSingularExact("oxen", "ox", true) + rs.AddSingular("vertices", "vertex") + rs.AddSingular("indices", "index") + rs.AddSingular("matrices", "matrix") + rs.AddSingularExact("quizzes", "quiz", true) + rs.AddSingular("databases", "database") + rs.AddSingular("resses", "ress") + rs.AddSingular("ress", "ress") + rs.AddIrregular("person", "people") + rs.AddIrregular("man", "men") + rs.AddIrregular("child", "children") + rs.AddIrregular("sex", "sexes") + rs.AddIrregular("move", "moves") + rs.AddIrregular("zombie", "zombies") + rs.AddIrregular("Status", "Statuses") + rs.AddIrregular("status", "statuses") + rs.AddIrregular("campus", "campuses") + rs.AddIrregular("human", "humans") + rs.AddUncountable("equipment") + rs.AddUncountable("information") + rs.AddUncountable("rice") + rs.AddUncountable("money") + rs.AddUncountable("species") + rs.AddUncountable("series") + rs.AddUncountable("fish") + rs.AddUncountable("sheep") + rs.AddUncountable("jeans") + rs.AddUncountable("police") + + acronyms := strings.Split(baseAcronyms, ",") + for _, acr := range acronyms { + rs.AddAcronym(acr) + } + + return rs +} + +// Uncountables returns a map of uncountables in the ruleset +func (rs *Ruleset) Uncountables() map[string]bool { + return rs.uncountables +} + +// AddPlural add a pluralization rule +func (rs *Ruleset) AddPlural(suffix, replacement string) { + rs.AddPluralExact(suffix, replacement, false) +} + +// AddPluralExact add a pluralization rule with full string match +func (rs *Ruleset) AddPluralExact(suffix, replacement string, exact bool) { + // remove uncountable + delete(rs.uncountables, suffix) + // create rule + r := new(Rule) + r.suffix = suffix + r.replacement = replacement + r.exact = exact + // prepend + rs.plurals = append([]*Rule{r}, rs.plurals...) +} + +// AddSingular add a singular rule +func (rs *Ruleset) AddSingular(suffix, replacement string) { + rs.AddSingularExact(suffix, replacement, false) +} + +// AddSingularExact same as AddSingular but you can set `exact` to force +// a full string match +func (rs *Ruleset) AddSingularExact(suffix, replacement string, exact bool) { + // remove from uncountable + delete(rs.uncountables, suffix) + // create rule + r := new(Rule) + r.suffix = suffix + r.replacement = replacement + r.exact = exact + rs.singulars = append([]*Rule{r}, rs.singulars...) +} + +// AddHuman Human rules are applied by humanize to show more friendly +// versions of words +func (rs *Ruleset) AddHuman(suffix, replacement string) { + r := new(Rule) + r.suffix = suffix + r.replacement = replacement + rs.humans = append([]*Rule{r}, rs.humans...) +} + +// AddIrregular Add any inconsistent pluralizing/singularizing rules +// to the set here. +func (rs *Ruleset) AddIrregular(singular, plural string) { + delete(rs.uncountables, singular) + delete(rs.uncountables, plural) + rs.AddPlural(singular, plural) + rs.AddPlural(plural, plural) + rs.AddSingular(plural, singular) +} + +// AddAcronym if you use acronym you may need to add them to the ruleset +// to prevent Underscored words of things like "HTML" coming out +// as "h_t_m_l" +func (rs *Ruleset) AddAcronym(word string) { + r := new(Rule) + r.suffix = word + r.replacement = rs.Titleize(strings.ToLower(word)) + rs.acronyms = append(rs.acronyms, r) +} + +// AddUncountable add a word to this ruleset that has the same singular and plural form +// for example: "rice" +func (rs *Ruleset) AddUncountable(word string) { + rs.uncountables[strings.ToLower(word)] = true +} + +func (rs *Ruleset) isUncountable(word string) bool { + // handle multiple words by using the last one + words := strings.Split(word, " ") + if _, exists := rs.uncountables[strings.ToLower(words[len(words)-1])]; exists { + return true + } + return false +} + +//isAcronym returns if a word is acronym or not. +func (rs *Ruleset) isAcronym(word string) bool { + for _, rule := range rs.acronyms { + if strings.ToUpper(rule.suffix) == strings.ToUpper(word) { + return true + } + } + + return false +} + +//PluralizeWithSize pluralize with taking number into account +func (rs *Ruleset) PluralizeWithSize(word string, size int) string { + if size == 1 { + return rs.Singularize(word) + } + return rs.Pluralize(word) +} + +// Pluralize returns the plural form of a singular word +func (rs *Ruleset) Pluralize(word string) string { + if len(word) == 0 { + return word + } + lWord := strings.ToLower(word) + if rs.isUncountable(lWord) { + return word + } + + var candidate string + for _, rule := range rs.plurals { + if rule.exact { + if lWord == rule.suffix { + // Capitalized word + if lWord[0] != word[0] && lWord[1:] == word[1:] { + return rs.Capitalize(rule.replacement) + } + return rule.replacement + } + continue + } + + if strings.EqualFold(word, rule.suffix) { + candidate = rule.replacement + } + + if strings.HasSuffix(word, rule.suffix) { + return replaceLast(word, rule.suffix, rule.replacement) + } + } + + if candidate != "" { + return candidate + } + return word + "s" +} + +//Singularize returns the singular form of a plural word +func (rs *Ruleset) Singularize(word string) string { + if len(word) <= 1 { + return word + } + lWord := strings.ToLower(word) + if rs.isUncountable(lWord) { + return word + } + + var candidate string + + for _, rule := range rs.singulars { + if rule.exact { + if lWord == rule.suffix { + // Capitalized word + if lWord[0] != word[0] && lWord[1:] == word[1:] { + return rs.Capitalize(rule.replacement) + } + return rule.replacement + } + continue + } + + if strings.EqualFold(word, rule.suffix) { + candidate = rule.replacement + } + + if strings.HasSuffix(word, rule.suffix) { + return replaceLast(word, rule.suffix, rule.replacement) + } + } + + if candidate != "" { + return candidate + } + + return word +} + +//Capitalize uppercase first character +func (rs *Ruleset) Capitalize(word string) string { + if rs.isAcronym(word) { + return strings.ToUpper(word) + } + return strings.ToUpper(word[:1]) + word[1:] +} + +//Camelize "dino_party" -> "DinoParty" +func (rs *Ruleset) Camelize(word string) string { + if rs.isAcronym(word) { + return strings.ToUpper(word) + } + words := splitAtCaseChangeWithTitlecase(word) + return strings.Join(words, "") +} + +//CamelizeDownFirst same as Camelcase but with first letter downcased +func (rs *Ruleset) CamelizeDownFirst(word string) string { + word = Camelize(word) + return strings.ToLower(word[:1]) + word[1:] +} + +//Titleize Capitalize every word in sentence "hello there" -> "Hello There" +func (rs *Ruleset) Titleize(word string) string { + words := splitAtCaseChangeWithTitlecase(word) + result := strings.Join(words, " ") + + var acronymWords []string + for index, word := range words { + if len(word) == 1 { + acronymWords = append(acronymWords, word) + } + + if len(word) > 1 || index == len(words)-1 || len(acronymWords) > 1 { + acronym := strings.Join(acronymWords, "") + if !rs.isAcronym(acronym) { + acronymWords = acronymWords[:len(acronymWords)] + continue + } + + result = strings.Replace(result, strings.Join(acronymWords, " "), acronym, 1) + acronymWords = []string{} + } + } + + return result +} + +func (rs *Ruleset) safeCaseAcronyms(word string) string { + // convert an acronym like HTML into Html + for _, rule := range rs.acronyms { + word = strings.Replace(word, rule.suffix, rule.replacement, -1) + } + return word +} + +func (rs *Ruleset) separatedWords(word, sep string) string { + word = rs.safeCaseAcronyms(word) + words := splitAtCaseChange(word) + return strings.Join(words, sep) +} + +//Underscore lowercase underscore version "BigBen" -> "big_ben" +func (rs *Ruleset) Underscore(word string) string { + return rs.separatedWords(word, "_") +} + +//Humanize First letter of sentence capitalized +// Uses custom friendly replacements via AddHuman() +func (rs *Ruleset) Humanize(word string) string { + word = replaceLast(word, "_id", "") // strip foreign key kinds + // replace and strings in humans list + for _, rule := range rs.humans { + word = strings.Replace(word, rule.suffix, rule.replacement, -1) + } + sentence := rs.separatedWords(word, " ") + + r, n := utf8.DecodeRuneInString(sentence) + return string(unicode.ToUpper(r)) + sentence[n:] +} + +//ForeignKey an underscored foreign key name "Person" -> "person_id" +func (rs *Ruleset) ForeignKey(word string) string { + return rs.Underscore(rs.Singularize(word)) + "_id" +} + +//ForeignKeyCondensed a foreign key (with an underscore) "Person" -> "personid" +func (rs *Ruleset) ForeignKeyCondensed(word string) string { + return rs.Underscore(word) + "id" +} + +//Tableize Rails style pluralized table names: "SuperPerson" -> "super_people" +func (rs *Ruleset) Tableize(word string) string { + return rs.Pluralize(rs.Underscore(rs.Typeify(word))) +} + +var notUrlSafe *regexp.Regexp = regexp.MustCompile(`[^\w\d\-_ ]`) + +//Parameterize param safe dasherized names like "my-param" +func (rs *Ruleset) Parameterize(word string) string { + return ParameterizeJoin(word, "-") +} + +//ParameterizeJoin param safe dasherized names with custom separator +func (rs *Ruleset) ParameterizeJoin(word, sep string) string { + word = strings.ToLower(word) + word = rs.Asciify(word) + word = notUrlSafe.ReplaceAllString(word, "") + word = strings.Replace(word, " ", sep, -1) + if len(sep) > 0 { + squash, err := regexp.Compile(sep + "+") + if err == nil { + word = squash.ReplaceAllString(word, sep) + } + } + word = strings.Trim(word, sep+" ") + return word +} + +var lookalikes = map[string]*regexp.Regexp{ + "A": regexp.MustCompile(`À|Ã|Â|Ã|Ä|Ã…`), + "AE": regexp.MustCompile(`Æ`), + "C": regexp.MustCompile(`Ç`), + "E": regexp.MustCompile(`È|É|Ê|Ë`), + "G": regexp.MustCompile(`Äž`), + "I": regexp.MustCompile(`ÃŒ|Ã|ÃŽ|Ã|Ä°`), + "N": regexp.MustCompile(`Ñ`), + "O": regexp.MustCompile(`Ã’|Ó|Ô|Õ|Ö|Ø`), + "S": regexp.MustCompile(`Åž`), + "U": regexp.MustCompile(`Ù|Ú|Û|Ãœ`), + "Y": regexp.MustCompile(`Ã`), + "ss": regexp.MustCompile(`ß`), + "a": regexp.MustCompile(`à |á|â|ã|ä|Ã¥`), + "ae": regexp.MustCompile(`æ`), + "c": regexp.MustCompile(`ç`), + "e": regexp.MustCompile(`è|é|ê|ë`), + "g": regexp.MustCompile(`ÄŸ`), + "i": regexp.MustCompile(`ì|Ã|î|ï|ı`), + "n": regexp.MustCompile(`ñ`), + "o": regexp.MustCompile(`ò|ó|ô|õ|ö|ø`), + "s": regexp.MustCompile(`ÅŸ`), + "u": regexp.MustCompile(`ù|ú|û|ü|Å©|Å«|Å|ů|ű|ų`), + "y": regexp.MustCompile(`ý|ÿ`), +} + +//Asciify transforms Latin characters like é -> e +func (rs *Ruleset) Asciify(word string) string { + for repl, regex := range lookalikes { + word = regex.ReplaceAllString(word, repl) + } + return word +} + +var tablePrefix = regexp.MustCompile(`^[^.]*\.`) + +//Typeify "something_like_this" -> "SomethingLikeThis" +func (rs *Ruleset) Typeify(word string) string { + word = tablePrefix.ReplaceAllString(word, "") + return rs.Camelize(rs.Singularize(word)) +} + +//Dasherize "SomeText" -> "some-text" +func (rs *Ruleset) Dasherize(word string) string { + return rs.separatedWords(word, "-") +} + +//Ordinalize "1031" -> "1031st" +func (rs *Ruleset) Ordinalize(str string) string { + number, err := strconv.Atoi(str) + if err != nil { + return str + } + switch abs(number) % 100 { + case 11, 12, 13: + return fmt.Sprintf("%dth", number) + default: + switch abs(number) % 10 { + case 1: + return fmt.Sprintf("%dst", number) + case 2: + return fmt.Sprintf("%dnd", number) + case 3: + return fmt.Sprintf("%drd", number) + } + } + return fmt.Sprintf("%dth", number) +} + +//ForeignKeyToAttribute returns the attribute name from the foreign key +func (rs *Ruleset) ForeignKeyToAttribute(str string) string { + w := rs.Camelize(str) + if strings.HasSuffix(w, "Id") { + return strings.TrimSuffix(w, "Id") + "ID" + } + return w +} + +//LoadReader loads rules from io.Reader param +func (rs *Ruleset) LoadReader(r io.Reader) error { + m := map[string]string{} + err := json.NewDecoder(r).Decode(&m) + if err != nil { + return fmt.Errorf("could not decode inflection JSON from reader: %s", err) + } + for s, p := range m { + defaultRuleset.AddIrregular(s, p) + } + return nil +} + +///////////////////////////////////////// +// the default global ruleset +////////////////////////////////////////// + +var defaultRuleset *Ruleset + +//LoadReader loads rules from io.Reader param +func LoadReader(r io.Reader) error { + return defaultRuleset.LoadReader(r) +} + +func init() { + defaultRuleset = NewDefaultRuleset() + + pwd, _ := os.Getwd() + cfg := filepath.Join(pwd, "inflections.json") + if p := os.Getenv("INFLECT_PATH"); p != "" { + cfg = p + } + if _, err := os.Stat(cfg); err == nil { + b, err := ioutil.ReadFile(cfg) + if err != nil { + fmt.Printf("could not read inflection file %s (%s)\n", cfg, err) + return + } + if err = defaultRuleset.LoadReader(bytes.NewReader(b)); err != nil { + fmt.Println(err) + } + } +} + +//Uncountables returns a list of uncountables rules +func Uncountables() map[string]bool { + return defaultRuleset.Uncountables() +} + +//AddPlural adds plural to the ruleset +func AddPlural(suffix, replacement string) { + defaultRuleset.AddPlural(suffix, replacement) +} + +//AddSingular adds singular to the ruleset +func AddSingular(suffix, replacement string) { + defaultRuleset.AddSingular(suffix, replacement) +} + +//AddHuman adds human +func AddHuman(suffix, replacement string) { + defaultRuleset.AddHuman(suffix, replacement) +} + +func AddIrregular(singular, plural string) { + defaultRuleset.AddIrregular(singular, plural) +} + +func AddAcronym(word string) { + defaultRuleset.AddAcronym(word) +} + +func AddUncountable(word string) { + defaultRuleset.AddUncountable(word) +} + +func Pluralize(word string) string { + return defaultRuleset.Pluralize(word) +} + +func PluralizeWithSize(word string, size int) string { + return defaultRuleset.PluralizeWithSize(word, size) +} + +func Singularize(word string) string { + return defaultRuleset.Singularize(word) +} + +func Capitalize(word string) string { + return defaultRuleset.Capitalize(word) +} + +func Camelize(word string) string { + return defaultRuleset.Camelize(word) +} + +func CamelizeDownFirst(word string) string { + return defaultRuleset.CamelizeDownFirst(word) +} + +func Titleize(word string) string { + return defaultRuleset.Titleize(word) +} + +func Underscore(word string) string { + return defaultRuleset.Underscore(word) +} + +func Humanize(word string) string { + return defaultRuleset.Humanize(word) +} + +func ForeignKey(word string) string { + return defaultRuleset.ForeignKey(word) +} + +func ForeignKeyCondensed(word string) string { + return defaultRuleset.ForeignKeyCondensed(word) +} + +func Tableize(word string) string { + return defaultRuleset.Tableize(word) +} + +func Parameterize(word string) string { + return defaultRuleset.Parameterize(word) +} + +func ParameterizeJoin(word, sep string) string { + return defaultRuleset.ParameterizeJoin(word, sep) +} + +func Typeify(word string) string { + return defaultRuleset.Typeify(word) +} + +func Dasherize(word string) string { + return defaultRuleset.Dasherize(word) +} + +func Ordinalize(word string) string { + return defaultRuleset.Ordinalize(word) +} + +func Asciify(word string) string { + return defaultRuleset.Asciify(word) +} + +func ForeignKeyToAttribute(word string) string { + return defaultRuleset.ForeignKeyToAttribute(word) +} + +// helper funcs + +func reverse(s string) string { + o := make([]rune, utf8.RuneCountInString(s)) + i := len(o) + for _, c := range s { + i-- + o[i] = c + } + return string(o) +} + +func isSpacerChar(c rune) bool { + switch { + case c == rune("_"[0]): + return true + case c == rune(" "[0]): + return true + case c == rune(":"[0]): + return true + case c == rune("-"[0]): + return true + } + return false +} + +func splitAtCaseChange(s string) []string { + words := make([]string, 0) + word := make([]rune, 0) + for _, c := range s { + spacer := isSpacerChar(c) + if len(word) > 0 { + if unicode.IsUpper(c) || spacer { + words = append(words, string(word)) + word = make([]rune, 0) + } + } + if !spacer { + word = append(word, unicode.ToLower(c)) + } + } + words = append(words, string(word)) + return words +} + +func splitAtCaseChangeWithTitlecase(s string) []string { + words := make([]string, 0) + word := make([]rune, 0) + + for _, c := range s { + spacer := isSpacerChar(c) + if len(word) > 0 { + if unicode.IsUpper(c) || spacer { + words = append(words, string(word)) + word = make([]rune, 0) + } + } + if !spacer { + if len(word) > 0 { + word = append(word, unicode.ToLower(c)) + } else { + word = append(word, unicode.ToUpper(c)) + } + } + } + + words = append(words, string(word)) + return words +} + +func replaceLast(s, match, repl string) string { + // reverse strings + srev := reverse(s) + mrev := reverse(match) + rrev := reverse(repl) + // match first and reverse back + return reverse(strings.Replace(srev, mrev, rrev, 1)) +} + +func abs(x int) int { + if x < 0 { + return -x + } + return x +} diff --git a/vendor/github.com/markbates/inflect/name.go b/vendor/github.com/markbates/inflect/name.go new file mode 100644 index 0000000000000000000000000000000000000000..e6863e28a6847bff404731fed2dc6bc3cdffb73d --- /dev/null +++ b/vendor/github.com/markbates/inflect/name.go @@ -0,0 +1,163 @@ +package inflect + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/gobuffalo/envy" +) + +// Name is a string that represents the "name" of a thing, like an app, model, etc... +type Name string + +// Title version of a name. ie. "foo_bar" => "Foo Bar" +func (n Name) Title() string { + x := strings.Split(string(n), "/") + for i, s := range x { + x[i] = Titleize(s) + } + + return strings.Join(x, " ") +} + +// Underscore version of a name. ie. "FooBar" => "foo_bar" +func (n Name) Underscore() string { + w := string(n) + if strings.ToUpper(w) == w { + return strings.ToLower(w) + } + return Underscore(w) +} + +// Plural version of a name +func (n Name) Plural() string { + return Pluralize(string(n)) +} + +// Singular version of a name +func (n Name) Singular() string { + return Singularize(string(n)) +} + +// Camel version of a name +func (n Name) Camel() string { + c := Camelize(string(n)) + if strings.HasSuffix(c, "Id") { + c = strings.TrimSuffix(c, "Id") + c += "ID" + } + return c +} + +// Model version of a name. ie. "user" => "User" +func (n Name) Model() string { + x := strings.Split(string(n), "/") + for i, s := range x { + x[i] = Camelize(Singularize(s)) + } + + return strings.Join(x, "") +} + +// Resource version of a name +func (n Name) Resource() string { + name := n.Underscore() + x := strings.FieldsFunc(name, func(r rune) bool { + return r == '_' || r == '/' + }) + + for i, w := range x { + if i == len(x)-1 { + x[i] = Camelize(Pluralize(strings.ToLower(w))) + continue + } + + x[i] = Camelize(w) + } + + return strings.Join(x, "") +} + +// ModelPlural version of a name. ie. "user" => "Users" +func (n Name) ModelPlural() string { + return Camelize(Pluralize(n.Model())) +} + +// File version of a name +func (n Name) File() string { + return Underscore(Camelize(string(n))) +} + +// Table version of a name +func (n Name) Table() string { + return Underscore(Pluralize(string(n))) +} + +// UnderSingular version of a name +func (n Name) UnderSingular() string { + return Underscore(Singularize(string(n))) +} + +// PluralCamel version of a name +func (n Name) PluralCamel() string { + return Pluralize(Camelize(string(n))) +} + +// PluralUnder version of a name +func (n Name) PluralUnder() string { + return Pluralize(Underscore(string(n))) +} + +// URL version of a name +func (n Name) URL() string { + return n.PluralUnder() +} + +// CamelSingular version of a name +func (n Name) CamelSingular() string { + return Camelize(Singularize(string(n))) +} + +// VarCaseSingular version of a name. ie. "FooBar" => "fooBar" +func (n Name) VarCaseSingular() string { + return CamelizeDownFirst(Singularize(Underscore(n.Resource()))) +} + +// VarCasePlural version of a name. ie. "FooBar" => "fooBar" +func (n Name) VarCasePlural() string { + return CamelizeDownFirst(n.Resource()) +} + +// Lower case version of a string +func (n Name) Lower() string { + return strings.ToLower(string(n)) +} + +// ParamID returns foo_bar_id +func (n Name) ParamID() string { + return fmt.Sprintf("%s_id", strings.Replace(n.UnderSingular(), "/", "_", -1)) +} + +// Package returns go package +func (n Name) Package() string { + key := string(n) + + for _, gp := range envy.GoPaths() { + key = strings.TrimPrefix(key, filepath.Join(gp, "src")) + key = strings.TrimPrefix(key, gp) + } + key = strings.TrimPrefix(key, string(filepath.Separator)) + + key = strings.Replace(key, "\\", "/", -1) + return key +} + +// Char returns first character in lower case, this is useful for methods inside a struct. +func (n Name) Char() string { + return strings.ToLower(string(n[0])) +} + +func (n Name) String() string { + return string(n) +} diff --git a/vendor/github.com/markbates/inflect/version.go b/vendor/github.com/markbates/inflect/version.go new file mode 100644 index 0000000000000000000000000000000000000000..a1674498419ae0031956f44ce0f46550c9c46ba6 --- /dev/null +++ b/vendor/github.com/markbates/inflect/version.go @@ -0,0 +1,3 @@ +package inflect + +const Version = "v1.0.4" diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go b/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go index e7fbb4b7fe13a08905c1a4769db084cf9473f765..fa144a1f629b5e7901424816393b82868d0a7fe5 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go @@ -39,6 +39,12 @@ const ( // Machine is the Schema for the machines API // +k8s:openapi-gen=true // +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Instance",type="string",JSONPath=".status.providerStatus.instanceId",description="Instance ID of machine created in AWS" +// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.providerStatus.instanceState",description="State of the AWS instance" +// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".spec.providerSpec.value.instanceType",description="Type of instance" +// +kubebuilder:printcolumn:name="Region",type="string",JSONPath=".spec.providerSpec.value.placement.region",description="Region associated with machine" +// +kubebuilder:printcolumn:name="Zone",type="string",JSONPath=".spec.providerSpec.value.placement.availabilityZone",description="Zone associated with machine" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Machine age" type Machine struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineset_types.go b/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineset_types.go index ee5ed9855bb5f29f2510b099f5404532a8e88873..11fbe43c7cb41157095020d00eaa18179a0bcc3c 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineset_types.go +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineset_types.go @@ -35,6 +35,11 @@ import ( // +k8s:openapi-gen=true // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector +// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".spec.replicas",description="Desired Replicas" +// +kubebuilder:printcolumn:name="Current",type="integer",JSONPath=".status.replicas",description="Current Replicas" +// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Ready Replicas" +// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.availableReplicas",description="Observed number of available replicas" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Machineset age" type MachineSet struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/vendor/github.com/rogpeppe/go-internal/LICENSE b/vendor/github.com/rogpeppe/go-internal/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..49ea0f928825ac4339299665088d332ac9953476 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go b/vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go new file mode 100644 index 0000000000000000000000000000000000000000..c94b3848a0e163a4d4220f0f39e00a390cd38c23 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go @@ -0,0 +1,47 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: Figure out what gopkg.in should do. + +package modfile + +import "strings" + +// ParseGopkgIn splits gopkg.in import paths into their constituent parts +func ParseGopkgIn(path string) (root, repo, major, subdir string, ok bool) { + if !strings.HasPrefix(path, "gopkg.in/") { + return + } + f := strings.Split(path, "/") + if len(f) >= 2 { + if elem, v, ok := dotV(f[1]); ok { + root = strings.Join(f[:2], "/") + repo = "github.com/go-" + elem + "/" + elem + major = v + subdir = strings.Join(f[2:], "/") + return root, repo, major, subdir, true + } + } + if len(f) >= 3 { + if elem, v, ok := dotV(f[2]); ok { + root = strings.Join(f[:3], "/") + repo = "github.com/" + f[1] + "/" + elem + major = v + subdir = strings.Join(f[3:], "/") + return root, repo, major, subdir, true + } + } + return +} + +func dotV(name string) (elem, v string, ok bool) { + i := len(name) - 1 + for i >= 0 && '0' <= name[i] && name[i] <= '9' { + i-- + } + if i <= 2 || i+1 >= len(name) || name[i-1] != '.' || name[i] != 'v' || name[i+1] == '0' && len(name) != i+2 { + return "", "", false + } + return name[:i-1], name[i:], true +} diff --git a/vendor/github.com/rogpeppe/go-internal/modfile/print.go b/vendor/github.com/rogpeppe/go-internal/modfile/print.go new file mode 100644 index 0000000000000000000000000000000000000000..7b1dd8f9533804a7e7fe3176eda959430d51b99a --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/modfile/print.go @@ -0,0 +1,164 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package modfile implements parsing and formatting for +// go.mod files. +package modfile + +import ( + "bytes" + "fmt" + "strings" +) + +func Format(f *FileSyntax) []byte { + pr := &printer{} + pr.file(f) + return pr.Bytes() +} + +// A printer collects the state during printing of a file or expression. +type printer struct { + bytes.Buffer // output buffer + comment []Comment // pending end-of-line comments + margin int // left margin (indent), a number of tabs +} + +// printf prints to the buffer. +func (p *printer) printf(format string, args ...interface{}) { + fmt.Fprintf(p, format, args...) +} + +// indent returns the position on the current line, in bytes, 0-indexed. +func (p *printer) indent() int { + b := p.Bytes() + n := 0 + for n < len(b) && b[len(b)-1-n] != '\n' { + n++ + } + return n +} + +// newline ends the current line, flushing end-of-line comments. +func (p *printer) newline() { + if len(p.comment) > 0 { + p.printf(" ") + for i, com := range p.comment { + if i > 0 { + p.trim() + p.printf("\n") + for i := 0; i < p.margin; i++ { + p.printf("\t") + } + } + p.printf("%s", strings.TrimSpace(com.Token)) + } + p.comment = p.comment[:0] + } + + p.trim() + p.printf("\n") + for i := 0; i < p.margin; i++ { + p.printf("\t") + } +} + +// trim removes trailing spaces and tabs from the current line. +func (p *printer) trim() { + // Remove trailing spaces and tabs from line we're about to end. + b := p.Bytes() + n := len(b) + for n > 0 && (b[n-1] == '\t' || b[n-1] == ' ') { + n-- + } + p.Truncate(n) +} + +// file formats the given file into the print buffer. +func (p *printer) file(f *FileSyntax) { + for _, com := range f.Before { + p.printf("%s", strings.TrimSpace(com.Token)) + p.newline() + } + + for i, stmt := range f.Stmt { + switch x := stmt.(type) { + case *CommentBlock: + // comments already handled + p.expr(x) + + default: + p.expr(x) + p.newline() + } + + for _, com := range stmt.Comment().After { + p.printf("%s", strings.TrimSpace(com.Token)) + p.newline() + } + + if i+1 < len(f.Stmt) { + p.newline() + } + } +} + +func (p *printer) expr(x Expr) { + // Emit line-comments preceding this expression. + if before := x.Comment().Before; len(before) > 0 { + // Want to print a line comment. + // Line comments must be at the current margin. + p.trim() + if p.indent() > 0 { + // There's other text on the line. Start a new line. + p.printf("\n") + } + // Re-indent to margin. + for i := 0; i < p.margin; i++ { + p.printf("\t") + } + for _, com := range before { + p.printf("%s", strings.TrimSpace(com.Token)) + p.newline() + } + } + + switch x := x.(type) { + default: + panic(fmt.Errorf("printer: unexpected type %T", x)) + + case *CommentBlock: + // done + + case *LParen: + p.printf("(") + case *RParen: + p.printf(")") + + case *Line: + sep := "" + for _, tok := range x.Token { + p.printf("%s%s", sep, tok) + sep = " " + } + + case *LineBlock: + for _, tok := range x.Token { + p.printf("%s ", tok) + } + p.expr(&x.LParen) + p.margin++ + for _, l := range x.Line { + p.newline() + p.expr(l) + } + p.margin-- + p.newline() + p.expr(&x.RParen) + } + + // Queue end-of-line comments for printing when we + // reach the end of the line. + p.comment = append(p.comment, x.Comment().Suffix...) +} diff --git a/vendor/github.com/rogpeppe/go-internal/modfile/read.go b/vendor/github.com/rogpeppe/go-internal/modfile/read.go new file mode 100644 index 0000000000000000000000000000000000000000..1d81ff1ab7a326061f9352bbfe485533b6f9a5f5 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/modfile/read.go @@ -0,0 +1,869 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Module file parser. +// This is a simplified copy of Google's buildifier parser. + +package modfile + +import ( + "bytes" + "fmt" + "os" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A Position describes the position between two bytes of input. +type Position struct { + Line int // line in input (starting at 1) + LineRune int // rune in line (starting at 1) + Byte int // byte in input (starting at 0) +} + +// add returns the position at the end of s, assuming it starts at p. +func (p Position) add(s string) Position { + p.Byte += len(s) + if n := strings.Count(s, "\n"); n > 0 { + p.Line += n + s = s[strings.LastIndex(s, "\n")+1:] + p.LineRune = 1 + } + p.LineRune += utf8.RuneCountInString(s) + return p +} + +// An Expr represents an input element. +type Expr interface { + // Span returns the start and end position of the expression, + // excluding leading or trailing comments. + Span() (start, end Position) + + // Comment returns the comments attached to the expression. + // This method would normally be named 'Comments' but that + // would interfere with embedding a type of the same name. + Comment() *Comments +} + +// A Comment represents a single // comment. +type Comment struct { + Start Position + Token string // without trailing newline + Suffix bool // an end of line (not whole line) comment +} + +// Comments collects the comments associated with an expression. +type Comments struct { + Before []Comment // whole-line comments before this expression + Suffix []Comment // end-of-line comments after this expression + + // For top-level expressions only, After lists whole-line + // comments following the expression. + After []Comment +} + +// Comment returns the receiver. This isn't useful by itself, but +// a Comments struct is embedded into all the expression +// implementation types, and this gives each of those a Comment +// method to satisfy the Expr interface. +func (c *Comments) Comment() *Comments { + return c +} + +// A FileSyntax represents an entire go.mod file. +type FileSyntax struct { + Name string // file path + Comments + Stmt []Expr +} + +func (x *FileSyntax) Span() (start, end Position) { + if len(x.Stmt) == 0 { + return + } + start, _ = x.Stmt[0].Span() + _, end = x.Stmt[len(x.Stmt)-1].Span() + return start, end +} + +func (x *FileSyntax) addLine(hint Expr, tokens ...string) *Line { + if hint == nil { + // If no hint given, add to the last statement of the given type. + Loop: + for i := len(x.Stmt) - 1; i >= 0; i-- { + stmt := x.Stmt[i] + switch stmt := stmt.(type) { + case *Line: + if stmt.Token != nil && stmt.Token[0] == tokens[0] { + hint = stmt + break Loop + } + case *LineBlock: + if stmt.Token[0] == tokens[0] { + hint = stmt + break Loop + } + } + } + } + + if hint != nil { + for i, stmt := range x.Stmt { + switch stmt := stmt.(type) { + case *Line: + if stmt == hint { + // Convert line to line block. + stmt.InBlock = true + block := &LineBlock{Token: stmt.Token[:1], Line: []*Line{stmt}} + stmt.Token = stmt.Token[1:] + x.Stmt[i] = block + new := &Line{Token: tokens[1:], InBlock: true} + block.Line = append(block.Line, new) + return new + } + case *LineBlock: + if stmt == hint { + new := &Line{Token: tokens[1:], InBlock: true} + stmt.Line = append(stmt.Line, new) + return new + } + for j, line := range stmt.Line { + if line == hint { + // Add new line after hint. + stmt.Line = append(stmt.Line, nil) + copy(stmt.Line[j+2:], stmt.Line[j+1:]) + new := &Line{Token: tokens[1:], InBlock: true} + stmt.Line[j+1] = new + return new + } + } + } + } + } + + new := &Line{Token: tokens} + x.Stmt = append(x.Stmt, new) + return new +} + +func (x *FileSyntax) updateLine(line *Line, tokens ...string) { + if line.InBlock { + tokens = tokens[1:] + } + line.Token = tokens +} + +func (x *FileSyntax) removeLine(line *Line) { + line.Token = nil +} + +// Cleanup cleans up the file syntax x after any edit operations. +// To avoid quadratic behavior, removeLine marks the line as dead +// by setting line.Token = nil but does not remove it from the slice +// in which it appears. After edits have all been indicated, +// calling Cleanup cleans out the dead lines. +func (x *FileSyntax) Cleanup() { + w := 0 + for _, stmt := range x.Stmt { + switch stmt := stmt.(type) { + case *Line: + if stmt.Token == nil { + continue + } + case *LineBlock: + ww := 0 + for _, line := range stmt.Line { + if line.Token != nil { + stmt.Line[ww] = line + ww++ + } + } + if ww == 0 { + continue + } + if ww == 1 { + // Collapse block into single line. + line := &Line{ + Comments: Comments{ + Before: commentsAdd(stmt.Before, stmt.Line[0].Before), + Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix), + After: commentsAdd(stmt.Line[0].After, stmt.After), + }, + Token: stringsAdd(stmt.Token, stmt.Line[0].Token), + } + x.Stmt[w] = line + w++ + continue + } + stmt.Line = stmt.Line[:ww] + } + x.Stmt[w] = stmt + w++ + } + x.Stmt = x.Stmt[:w] +} + +func commentsAdd(x, y []Comment) []Comment { + return append(x[:len(x):len(x)], y...) +} + +func stringsAdd(x, y []string) []string { + return append(x[:len(x):len(x)], y...) +} + +// A CommentBlock represents a top-level block of comments separate +// from any rule. +type CommentBlock struct { + Comments + Start Position +} + +func (x *CommentBlock) Span() (start, end Position) { + return x.Start, x.Start +} + +// A Line is a single line of tokens. +type Line struct { + Comments + Start Position + Token []string + InBlock bool + End Position +} + +func (x *Line) Span() (start, end Position) { + return x.Start, x.End +} + +// A LineBlock is a factored block of lines, like +// +// require ( +// "x" +// "y" +// ) +// +type LineBlock struct { + Comments + Start Position + LParen LParen + Token []string + Line []*Line + RParen RParen +} + +func (x *LineBlock) Span() (start, end Position) { + return x.Start, x.RParen.Pos.add(")") +} + +// An LParen represents the beginning of a parenthesized line block. +// It is a place to store suffix comments. +type LParen struct { + Comments + Pos Position +} + +func (x *LParen) Span() (start, end Position) { + return x.Pos, x.Pos.add(")") +} + +// An RParen represents the end of a parenthesized line block. +// It is a place to store whole-line (before) comments. +type RParen struct { + Comments + Pos Position +} + +func (x *RParen) Span() (start, end Position) { + return x.Pos, x.Pos.add(")") +} + +// An input represents a single input file being parsed. +type input struct { + // Lexing state. + filename string // name of input file, for errors + complete []byte // entire input + remaining []byte // remaining input + token []byte // token being scanned + lastToken string // most recently returned token, for error messages + pos Position // current input position + comments []Comment // accumulated comments + endRule int // position of end of current rule + + // Parser state. + file *FileSyntax // returned top-level syntax tree + parseError error // error encountered during parsing + + // Comment assignment state. + pre []Expr // all expressions, in preorder traversal + post []Expr // all expressions, in postorder traversal +} + +func newInput(filename string, data []byte) *input { + return &input{ + filename: filename, + complete: data, + remaining: data, + pos: Position{Line: 1, LineRune: 1, Byte: 0}, + } +} + +// parse parses the input file. +func parse(file string, data []byte) (f *FileSyntax, err error) { + in := newInput(file, data) + // The parser panics for both routine errors like syntax errors + // and for programmer bugs like array index errors. + // Turn both into error returns. Catching bug panics is + // especially important when processing many files. + defer func() { + if e := recover(); e != nil { + if e == in.parseError { + err = in.parseError + } else { + err = fmt.Errorf("%s:%d:%d: internal error: %v", in.filename, in.pos.Line, in.pos.LineRune, e) + } + } + }() + + // Invoke the parser. + in.parseFile() + if in.parseError != nil { + return nil, in.parseError + } + in.file.Name = in.filename + + // Assign comments to nearby syntax. + in.assignComments() + + return in.file, nil +} + +// Error is called to report an error. +// The reason s is often "syntax error". +// Error does not return: it panics. +func (in *input) Error(s string) { + if s == "syntax error" && in.lastToken != "" { + s += " near " + in.lastToken + } + in.parseError = fmt.Errorf("%s:%d:%d: %v", in.filename, in.pos.Line, in.pos.LineRune, s) + panic(in.parseError) +} + +// eof reports whether the input has reached end of file. +func (in *input) eof() bool { + return len(in.remaining) == 0 +} + +// peekRune returns the next rune in the input without consuming it. +func (in *input) peekRune() int { + if len(in.remaining) == 0 { + return 0 + } + r, _ := utf8.DecodeRune(in.remaining) + return int(r) +} + +// peekPrefix reports whether the remaining input begins with the given prefix. +func (in *input) peekPrefix(prefix string) bool { + // This is like bytes.HasPrefix(in.remaining, []byte(prefix)) + // but without the allocation of the []byte copy of prefix. + for i := 0; i < len(prefix); i++ { + if i >= len(in.remaining) || in.remaining[i] != prefix[i] { + return false + } + } + return true +} + +// readRune consumes and returns the next rune in the input. +func (in *input) readRune() int { + if len(in.remaining) == 0 { + in.Error("internal lexer error: readRune at EOF") + } + r, size := utf8.DecodeRune(in.remaining) + in.remaining = in.remaining[size:] + if r == '\n' { + in.pos.Line++ + in.pos.LineRune = 1 + } else { + in.pos.LineRune++ + } + in.pos.Byte += size + return int(r) +} + +type symType struct { + pos Position + endPos Position + text string +} + +// startToken marks the beginning of the next input token. +// It must be followed by a call to endToken, once the token has +// been consumed using readRune. +func (in *input) startToken(sym *symType) { + in.token = in.remaining + sym.text = "" + sym.pos = in.pos +} + +// endToken marks the end of an input token. +// It records the actual token string in sym.text if the caller +// has not done that already. +func (in *input) endToken(sym *symType) { + if sym.text == "" { + tok := string(in.token[:len(in.token)-len(in.remaining)]) + sym.text = tok + in.lastToken = sym.text + } + sym.endPos = in.pos +} + +// lex is called from the parser to obtain the next input token. +// It returns the token value (either a rune like '+' or a symbolic token _FOR) +// and sets val to the data associated with the token. +// For all our input tokens, the associated data is +// val.Pos (the position where the token begins) +// and val.Token (the input string corresponding to the token). +func (in *input) lex(sym *symType) int { + // Skip past spaces, stopping at non-space or EOF. + countNL := 0 // number of newlines we've skipped past + for !in.eof() { + // Skip over spaces. Count newlines so we can give the parser + // information about where top-level blank lines are, + // for top-level comment assignment. + c := in.peekRune() + if c == ' ' || c == '\t' || c == '\r' { + in.readRune() + continue + } + + // Comment runs to end of line. + if in.peekPrefix("//") { + in.startToken(sym) + + // Is this comment the only thing on its line? + // Find the last \n before this // and see if it's all + // spaces from there to here. + i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n")) + suffix := len(bytes.TrimSpace(in.complete[i+1:in.pos.Byte])) > 0 + in.readRune() + in.readRune() + + // Consume comment. + for len(in.remaining) > 0 && in.readRune() != '\n' { + } + in.endToken(sym) + + sym.text = strings.TrimRight(sym.text, "\n") + in.lastToken = "comment" + + // If we are at top level (not in a statement), hand the comment to + // the parser as a _COMMENT token. The grammar is written + // to handle top-level comments itself. + if !suffix { + // Not in a statement. Tell parser about top-level comment. + return _COMMENT + } + + // Otherwise, save comment for later attachment to syntax tree. + if countNL > 1 { + in.comments = append(in.comments, Comment{sym.pos, "", false}) + } + in.comments = append(in.comments, Comment{sym.pos, sym.text, suffix}) + countNL = 1 + return _EOL + } + + if in.peekPrefix("/*") { + in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)")) + } + + // Found non-space non-comment. + break + } + + // Found the beginning of the next token. + in.startToken(sym) + defer in.endToken(sym) + + // End of file. + if in.eof() { + in.lastToken = "EOF" + return _EOF + } + + // Punctuation tokens. + switch c := in.peekRune(); c { + case '\n': + in.readRune() + return c + + case '(': + in.readRune() + return c + + case ')': + in.readRune() + return c + + case '"', '`': // quoted string + quote := c + in.readRune() + for { + if in.eof() { + in.pos = sym.pos + in.Error("unexpected EOF in string") + } + if in.peekRune() == '\n' { + in.Error("unexpected newline in string") + } + c := in.readRune() + if c == quote { + break + } + if c == '\\' && quote != '`' { + if in.eof() { + in.pos = sym.pos + in.Error("unexpected EOF in string") + } + in.readRune() + } + } + in.endToken(sym) + return _STRING + } + + // Checked all punctuation. Must be identifier token. + if c := in.peekRune(); !isIdent(c) { + in.Error(fmt.Sprintf("unexpected input character %#q", c)) + } + + // Scan over identifier. + for isIdent(in.peekRune()) { + if in.peekPrefix("//") { + break + } + if in.peekPrefix("/*") { + in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)")) + } + in.readRune() + } + return _IDENT +} + +// isIdent reports whether c is an identifier rune. +// We treat nearly all runes as identifier runes. +func isIdent(c int) bool { + return c != 0 && !unicode.IsSpace(rune(c)) +} + +// Comment assignment. +// We build two lists of all subexpressions, preorder and postorder. +// The preorder list is ordered by start location, with outer expressions first. +// The postorder list is ordered by end location, with outer expressions last. +// We use the preorder list to assign each whole-line comment to the syntax +// immediately following it, and we use the postorder list to assign each +// end-of-line comment to the syntax immediately preceding it. + +// order walks the expression adding it and its subexpressions to the +// preorder and postorder lists. +func (in *input) order(x Expr) { + if x != nil { + in.pre = append(in.pre, x) + } + switch x := x.(type) { + default: + panic(fmt.Errorf("order: unexpected type %T", x)) + case nil: + // nothing + case *LParen, *RParen: + // nothing + case *CommentBlock: + // nothing + case *Line: + // nothing + case *FileSyntax: + for _, stmt := range x.Stmt { + in.order(stmt) + } + case *LineBlock: + in.order(&x.LParen) + for _, l := range x.Line { + in.order(l) + } + in.order(&x.RParen) + } + if x != nil { + in.post = append(in.post, x) + } +} + +// assignComments attaches comments to nearby syntax. +func (in *input) assignComments() { + const debug = false + + // Generate preorder and postorder lists. + in.order(in.file) + + // Split into whole-line comments and suffix comments. + var line, suffix []Comment + for _, com := range in.comments { + if com.Suffix { + suffix = append(suffix, com) + } else { + line = append(line, com) + } + } + + if debug { + for _, c := range line { + fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) + } + } + + // Assign line comments to syntax immediately following. + for _, x := range in.pre { + start, _ := x.Span() + if debug { + fmt.Printf("pre %T :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte) + } + xcom := x.Comment() + for len(line) > 0 && start.Byte >= line[0].Start.Byte { + if debug { + fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", line[0].Token, line[0].Start.Byte) + } + xcom.Before = append(xcom.Before, line[0]) + line = line[1:] + } + } + + // Remaining line comments go at end of file. + in.file.After = append(in.file.After, line...) + + if debug { + for _, c := range suffix { + fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) + } + } + + // Assign suffix comments to syntax immediately before. + for i := len(in.post) - 1; i >= 0; i-- { + x := in.post[i] + + start, end := x.Span() + if debug { + fmt.Printf("post %T :%d:%d #%d :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte, end.Line, end.LineRune, end.Byte) + } + + // Do not assign suffix comments to end of line block or whole file. + // Instead assign them to the last element inside. + switch x.(type) { + case *FileSyntax: + continue + } + + // Do not assign suffix comments to something that starts + // on an earlier line, so that in + // + // x ( y + // z ) // comment + // + // we assign the comment to z and not to x ( ... ). + if start.Line != end.Line { + continue + } + xcom := x.Comment() + for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte { + if debug { + fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", suffix[len(suffix)-1].Token, suffix[len(suffix)-1].Start.Byte) + } + xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1]) + suffix = suffix[:len(suffix)-1] + } + } + + // We assigned suffix comments in reverse. + // If multiple suffix comments were appended to the same + // expression node, they are now in reverse. Fix that. + for _, x := range in.post { + reverseComments(x.Comment().Suffix) + } + + // Remaining suffix comments go at beginning of file. + in.file.Before = append(in.file.Before, suffix...) +} + +// reverseComments reverses the []Comment list. +func reverseComments(list []Comment) { + for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 { + list[i], list[j] = list[j], list[i] + } +} + +func (in *input) parseFile() { + in.file = new(FileSyntax) + var sym symType + var cb *CommentBlock + for { + tok := in.lex(&sym) + switch tok { + case '\n': + if cb != nil { + in.file.Stmt = append(in.file.Stmt, cb) + cb = nil + } + case _COMMENT: + if cb == nil { + cb = &CommentBlock{Start: sym.pos} + } + com := cb.Comment() + com.Before = append(com.Before, Comment{Start: sym.pos, Token: sym.text}) + case _EOF: + if cb != nil { + in.file.Stmt = append(in.file.Stmt, cb) + } + return + default: + in.parseStmt(&sym) + if cb != nil { + in.file.Stmt[len(in.file.Stmt)-1].Comment().Before = cb.Before + cb = nil + } + } + } +} + +func (in *input) parseStmt(sym *symType) { + start := sym.pos + end := sym.endPos + token := []string{sym.text} + for { + tok := in.lex(sym) + switch tok { + case '\n', _EOF, _EOL: + in.file.Stmt = append(in.file.Stmt, &Line{ + Start: start, + Token: token, + End: end, + }) + return + case '(': + in.file.Stmt = append(in.file.Stmt, in.parseLineBlock(start, token, sym)) + return + default: + token = append(token, sym.text) + end = sym.endPos + } + } +} + +func (in *input) parseLineBlock(start Position, token []string, sym *symType) *LineBlock { + x := &LineBlock{ + Start: start, + Token: token, + LParen: LParen{Pos: sym.pos}, + } + var comments []Comment + for { + tok := in.lex(sym) + switch tok { + case _EOL: + // ignore + case '\n': + if len(comments) == 0 && len(x.Line) > 0 || len(comments) > 0 && comments[len(comments)-1].Token != "" { + comments = append(comments, Comment{}) + } + case _COMMENT: + comments = append(comments, Comment{Start: sym.pos, Token: sym.text}) + case _EOF: + in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune)) + case ')': + x.RParen.Before = comments + x.RParen.Pos = sym.pos + tok = in.lex(sym) + if tok != '\n' && tok != _EOF && tok != _EOL { + in.Error("syntax error (expected newline after closing paren)") + } + return x + default: + l := in.parseLine(sym) + x.Line = append(x.Line, l) + l.Comment().Before = comments + comments = nil + } + } +} + +func (in *input) parseLine(sym *symType) *Line { + start := sym.pos + end := sym.endPos + token := []string{sym.text} + for { + tok := in.lex(sym) + switch tok { + case '\n', _EOF, _EOL: + return &Line{ + Start: start, + Token: token, + End: end, + InBlock: true, + } + default: + token = append(token, sym.text) + end = sym.endPos + } + } +} + +const ( + _EOF = -(1 + iota) + _EOL + _IDENT + _STRING + _COMMENT +) + +var ( + slashSlash = []byte("//") + moduleStr = []byte("module") +) + +// ModulePath returns the module path from the gomod file text. +// If it cannot find a module path, it returns an empty string. +// It is tolerant of unrelated problems in the go.mod file. +func ModulePath(mod []byte) string { + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + return "" // missing module path +} diff --git a/vendor/github.com/rogpeppe/go-internal/modfile/rule.go b/vendor/github.com/rogpeppe/go-internal/modfile/rule.go new file mode 100644 index 0000000000000000000000000000000000000000..24d275f12f2548fb53624b917696e30ce755d080 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/modfile/rule.go @@ -0,0 +1,724 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modfile + +import ( + "bytes" + "errors" + "fmt" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "unicode" + + "github.com/rogpeppe/go-internal/module" + "github.com/rogpeppe/go-internal/semver" +) + +// A File is the parsed, interpreted form of a go.mod file. +type File struct { + Module *Module + Go *Go + Require []*Require + Exclude []*Exclude + Replace []*Replace + + Syntax *FileSyntax +} + +// A Module is the module statement. +type Module struct { + Mod module.Version + Syntax *Line +} + +// A Go is the go statement. +type Go struct { + Version string // "1.23" + Syntax *Line +} + +// A Require is a single require statement. +type Require struct { + Mod module.Version + Indirect bool // has "// indirect" comment + Syntax *Line +} + +// An Exclude is a single exclude statement. +type Exclude struct { + Mod module.Version + Syntax *Line +} + +// A Replace is a single replace statement. +type Replace struct { + Old module.Version + New module.Version + Syntax *Line +} + +func (f *File) AddModuleStmt(path string) error { + if f.Syntax == nil { + f.Syntax = new(FileSyntax) + } + if f.Module == nil { + f.Module = &Module{ + Mod: module.Version{Path: path}, + Syntax: f.Syntax.addLine(nil, "module", AutoQuote(path)), + } + } else { + f.Module.Mod.Path = path + f.Syntax.updateLine(f.Module.Syntax, "module", AutoQuote(path)) + } + return nil +} + +func (f *File) AddComment(text string) { + if f.Syntax == nil { + f.Syntax = new(FileSyntax) + } + f.Syntax.Stmt = append(f.Syntax.Stmt, &CommentBlock{ + Comments: Comments{ + Before: []Comment{ + { + Token: text, + }, + }, + }, + }) +} + +type VersionFixer func(path, version string) (string, error) + +// Parse parses the data, reported in errors as being from file, +// into a File struct. It applies fix, if non-nil, to canonicalize all module versions found. +func Parse(file string, data []byte, fix VersionFixer) (*File, error) { + return parseToFile(file, data, fix, true) +} + +// ParseLax is like Parse but ignores unknown statements. +// It is used when parsing go.mod files other than the main module, +// under the theory that most statement types we add in the future will +// only apply in the main module, like exclude and replace, +// and so we get better gradual deployments if old go commands +// simply ignore those statements when found in go.mod files +// in dependencies. +func ParseLax(file string, data []byte, fix VersionFixer) (*File, error) { + return parseToFile(file, data, fix, false) +} + +func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (*File, error) { + fs, err := parse(file, data) + if err != nil { + return nil, err + } + f := &File{ + Syntax: fs, + } + + var errs bytes.Buffer + for _, x := range fs.Stmt { + switch x := x.(type) { + case *Line: + f.add(&errs, x, x.Token[0], x.Token[1:], fix, strict) + + case *LineBlock: + if len(x.Token) > 1 { + if strict { + fmt.Fprintf(&errs, "%s:%d: unknown block type: %s\n", file, x.Start.Line, strings.Join(x.Token, " ")) + } + continue + } + switch x.Token[0] { + default: + if strict { + fmt.Fprintf(&errs, "%s:%d: unknown block type: %s\n", file, x.Start.Line, strings.Join(x.Token, " ")) + } + continue + case "module", "require", "exclude", "replace": + for _, l := range x.Line { + f.add(&errs, l, x.Token[0], l.Token, fix, strict) + } + } + } + } + + if errs.Len() > 0 { + return nil, errors.New(strings.TrimRight(errs.String(), "\n")) + } + return f, nil +} + +var goVersionRE = regexp.MustCompile(`([1-9][0-9]*)\.(0|[1-9][0-9]*)`) + +func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, fix VersionFixer, strict bool) { + // If strict is false, this module is a dependency. + // We ignore all unknown directives as well as main-module-only + // directives like replace and exclude. It will work better for + // forward compatibility if we can depend on modules that have unknown + // statements (presumed relevant only when acting as the main module) + // and simply ignore those statements. + if !strict { + switch verb { + case "module", "require", "go": + // want these even for dependency go.mods + default: + return + } + } + + switch verb { + default: + fmt.Fprintf(errs, "%s:%d: unknown directive: %s\n", f.Syntax.Name, line.Start.Line, verb) + + case "go": + if f.Go != nil { + fmt.Fprintf(errs, "%s:%d: repeated go statement\n", f.Syntax.Name, line.Start.Line) + return + } + if len(args) != 1 || !goVersionRE.MatchString(args[0]) { + fmt.Fprintf(errs, "%s:%d: usage: go 1.23\n", f.Syntax.Name, line.Start.Line) + return + } + f.Go = &Go{Syntax: line} + f.Go.Version = args[0] + case "module": + if f.Module != nil { + fmt.Fprintf(errs, "%s:%d: repeated module statement\n", f.Syntax.Name, line.Start.Line) + return + } + f.Module = &Module{Syntax: line} + if len(args) != 1 { + + fmt.Fprintf(errs, "%s:%d: usage: module module/path [version]\n", f.Syntax.Name, line.Start.Line) + return + } + s, err := parseString(&args[0]) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err) + return + } + f.Module.Mod = module.Version{Path: s} + case "require", "exclude": + if len(args) != 2 { + fmt.Fprintf(errs, "%s:%d: usage: %s module/path v1.2.3\n", f.Syntax.Name, line.Start.Line, verb) + return + } + s, err := parseString(&args[0]) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err) + return + } + old := args[1] + v, err := parseVersion(s, &args[1], fix) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid module version %q: %v\n", f.Syntax.Name, line.Start.Line, old, err) + return + } + pathMajor, err := modulePathMajor(s) + if err != nil { + fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err) + return + } + if !module.MatchPathMajor(v, pathMajor) { + if pathMajor == "" { + pathMajor = "v0 or v1" + } + fmt.Fprintf(errs, "%s:%d: invalid module: %s should be %s, not %s (%s)\n", f.Syntax.Name, line.Start.Line, s, pathMajor, semver.Major(v), v) + return + } + if verb == "require" { + f.Require = append(f.Require, &Require{ + Mod: module.Version{Path: s, Version: v}, + Syntax: line, + Indirect: isIndirect(line), + }) + } else { + f.Exclude = append(f.Exclude, &Exclude{ + Mod: module.Version{Path: s, Version: v}, + Syntax: line, + }) + } + case "replace": + arrow := 2 + if len(args) >= 2 && args[1] == "=>" { + arrow = 1 + } + if len(args) < arrow+2 || len(args) > arrow+3 || args[arrow] != "=>" { + fmt.Fprintf(errs, "%s:%d: usage: %s module/path [v1.2.3] => other/module v1.4\n\t or %s module/path [v1.2.3] => ../local/directory\n", f.Syntax.Name, line.Start.Line, verb, verb) + return + } + s, err := parseString(&args[0]) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err) + return + } + pathMajor, err := modulePathMajor(s) + if err != nil { + fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err) + return + } + var v string + if arrow == 2 { + old := args[1] + v, err = parseVersion(s, &args[1], fix) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid module version %v: %v\n", f.Syntax.Name, line.Start.Line, old, err) + return + } + if !module.MatchPathMajor(v, pathMajor) { + if pathMajor == "" { + pathMajor = "v0 or v1" + } + fmt.Fprintf(errs, "%s:%d: invalid module: %s should be %s, not %s (%s)\n", f.Syntax.Name, line.Start.Line, s, pathMajor, semver.Major(v), v) + return + } + } + ns, err := parseString(&args[arrow+1]) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err) + return + } + nv := "" + if len(args) == arrow+2 { + if !IsDirectoryPath(ns) { + fmt.Fprintf(errs, "%s:%d: replacement module without version must be directory path (rooted or starting with ./ or ../)\n", f.Syntax.Name, line.Start.Line) + return + } + if filepath.Separator == '/' && strings.Contains(ns, `\`) { + fmt.Fprintf(errs, "%s:%d: replacement directory appears to be Windows path (on a non-windows system)\n", f.Syntax.Name, line.Start.Line) + return + } + } + if len(args) == arrow+3 { + old := args[arrow+1] + nv, err = parseVersion(ns, &args[arrow+2], fix) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid module version %v: %v\n", f.Syntax.Name, line.Start.Line, old, err) + return + } + if IsDirectoryPath(ns) { + fmt.Fprintf(errs, "%s:%d: replacement module directory path %q cannot have version\n", f.Syntax.Name, line.Start.Line, ns) + return + } + } + f.Replace = append(f.Replace, &Replace{ + Old: module.Version{Path: s, Version: v}, + New: module.Version{Path: ns, Version: nv}, + Syntax: line, + }) + } +} + +// isIndirect reports whether line has a "// indirect" comment, +// meaning it is in go.mod only for its effect on indirect dependencies, +// so that it can be dropped entirely once the effective version of the +// indirect dependency reaches the given minimum version. +func isIndirect(line *Line) bool { + if len(line.Suffix) == 0 { + return false + } + f := strings.Fields(line.Suffix[0].Token) + return (len(f) == 2 && f[1] == "indirect" || len(f) > 2 && f[1] == "indirect;") && f[0] == "//" +} + +// setIndirect sets line to have (or not have) a "// indirect" comment. +func setIndirect(line *Line, indirect bool) { + if isIndirect(line) == indirect { + return + } + if indirect { + // Adding comment. + if len(line.Suffix) == 0 { + // New comment. + line.Suffix = []Comment{{Token: "// indirect", Suffix: true}} + return + } + // Insert at beginning of existing comment. + com := &line.Suffix[0] + space := " " + if len(com.Token) > 2 && com.Token[2] == ' ' || com.Token[2] == '\t' { + space = "" + } + com.Token = "// indirect;" + space + com.Token[2:] + return + } + + // Removing comment. + f := strings.Fields(line.Suffix[0].Token) + if len(f) == 2 { + // Remove whole comment. + line.Suffix = nil + return + } + + // Remove comment prefix. + com := &line.Suffix[0] + i := strings.Index(com.Token, "indirect;") + com.Token = "//" + com.Token[i+len("indirect;"):] +} + +// IsDirectoryPath reports whether the given path should be interpreted +// as a directory path. Just like on the go command line, relative paths +// and rooted paths are directory paths; the rest are module paths. +func IsDirectoryPath(ns string) bool { + // Because go.mod files can move from one system to another, + // we check all known path syntaxes, both Unix and Windows. + return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") || + strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) || + len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':' +} + +// MustQuote reports whether s must be quoted in order to appear as +// a single token in a go.mod line. +func MustQuote(s string) bool { + for _, r := range s { + if !unicode.IsPrint(r) || r == ' ' || r == '"' || r == '\'' || r == '`' { + return true + } + } + return s == "" || strings.Contains(s, "//") || strings.Contains(s, "/*") +} + +// AutoQuote returns s or, if quoting is required for s to appear in a go.mod, +// the quotation of s. +func AutoQuote(s string) string { + if MustQuote(s) { + return strconv.Quote(s) + } + return s +} + +func parseString(s *string) (string, error) { + t := *s + if strings.HasPrefix(t, `"`) { + var err error + if t, err = strconv.Unquote(t); err != nil { + return "", err + } + } else if strings.ContainsAny(t, "\"'`") { + // Other quotes are reserved both for possible future expansion + // and to avoid confusion. For example if someone types 'x' + // we want that to be a syntax error and not a literal x in literal quotation marks. + return "", fmt.Errorf("unquoted string cannot contain quote") + } + *s = AutoQuote(t) + return t, nil +} + +func parseVersion(path string, s *string, fix VersionFixer) (string, error) { + t, err := parseString(s) + if err != nil { + return "", err + } + if fix != nil { + var err error + t, err = fix(path, t) + if err != nil { + return "", err + } + } + if v := module.CanonicalVersion(t); v != "" { + *s = v + return *s, nil + } + return "", fmt.Errorf("version must be of the form v1.2.3") +} + +func modulePathMajor(path string) (string, error) { + _, major, ok := module.SplitPathVersion(path) + if !ok { + return "", fmt.Errorf("invalid module path") + } + return major, nil +} + +func (f *File) Format() ([]byte, error) { + return Format(f.Syntax), nil +} + +// Cleanup cleans up the file f after any edit operations. +// To avoid quadratic behavior, modifications like DropRequire +// clear the entry but do not remove it from the slice. +// Cleanup cleans out all the cleared entries. +func (f *File) Cleanup() { + w := 0 + for _, r := range f.Require { + if r.Mod.Path != "" { + f.Require[w] = r + w++ + } + } + f.Require = f.Require[:w] + + w = 0 + for _, x := range f.Exclude { + if x.Mod.Path != "" { + f.Exclude[w] = x + w++ + } + } + f.Exclude = f.Exclude[:w] + + w = 0 + for _, r := range f.Replace { + if r.Old.Path != "" { + f.Replace[w] = r + w++ + } + } + f.Replace = f.Replace[:w] + + f.Syntax.Cleanup() +} + +func (f *File) AddRequire(path, vers string) error { + need := true + for _, r := range f.Require { + if r.Mod.Path == path { + if need { + r.Mod.Version = vers + f.Syntax.updateLine(r.Syntax, "require", AutoQuote(path), vers) + need = false + } else { + f.Syntax.removeLine(r.Syntax) + *r = Require{} + } + } + } + + if need { + f.AddNewRequire(path, vers, false) + } + return nil +} + +func (f *File) AddNewRequire(path, vers string, indirect bool) { + line := f.Syntax.addLine(nil, "require", AutoQuote(path), vers) + setIndirect(line, indirect) + f.Require = append(f.Require, &Require{module.Version{Path: path, Version: vers}, indirect, line}) +} + +func (f *File) SetRequire(req []*Require) { + need := make(map[string]string) + indirect := make(map[string]bool) + for _, r := range req { + need[r.Mod.Path] = r.Mod.Version + indirect[r.Mod.Path] = r.Indirect + } + + for _, r := range f.Require { + if v, ok := need[r.Mod.Path]; ok { + r.Mod.Version = v + r.Indirect = indirect[r.Mod.Path] + } + } + + var newStmts []Expr + for _, stmt := range f.Syntax.Stmt { + switch stmt := stmt.(type) { + case *LineBlock: + if len(stmt.Token) > 0 && stmt.Token[0] == "require" { + var newLines []*Line + for _, line := range stmt.Line { + if p, err := parseString(&line.Token[0]); err == nil && need[p] != "" { + line.Token[1] = need[p] + delete(need, p) + setIndirect(line, indirect[p]) + newLines = append(newLines, line) + } + } + if len(newLines) == 0 { + continue // drop stmt + } + stmt.Line = newLines + } + + case *Line: + if len(stmt.Token) > 0 && stmt.Token[0] == "require" { + if p, err := parseString(&stmt.Token[1]); err == nil && need[p] != "" { + stmt.Token[2] = need[p] + delete(need, p) + setIndirect(stmt, indirect[p]) + } else { + continue // drop stmt + } + } + } + newStmts = append(newStmts, stmt) + } + f.Syntax.Stmt = newStmts + + for path, vers := range need { + f.AddNewRequire(path, vers, indirect[path]) + } + f.SortBlocks() +} + +func (f *File) DropRequire(path string) error { + for _, r := range f.Require { + if r.Mod.Path == path { + f.Syntax.removeLine(r.Syntax) + *r = Require{} + } + } + return nil +} + +func (f *File) AddExclude(path, vers string) error { + var hint *Line + for _, x := range f.Exclude { + if x.Mod.Path == path && x.Mod.Version == vers { + return nil + } + if x.Mod.Path == path { + hint = x.Syntax + } + } + + f.Exclude = append(f.Exclude, &Exclude{Mod: module.Version{Path: path, Version: vers}, Syntax: f.Syntax.addLine(hint, "exclude", AutoQuote(path), vers)}) + return nil +} + +func (f *File) DropExclude(path, vers string) error { + for _, x := range f.Exclude { + if x.Mod.Path == path && x.Mod.Version == vers { + f.Syntax.removeLine(x.Syntax) + *x = Exclude{} + } + } + return nil +} + +func (f *File) AddReplace(oldPath, oldVers, newPath, newVers string) error { + need := true + old := module.Version{Path: oldPath, Version: oldVers} + new := module.Version{Path: newPath, Version: newVers} + tokens := []string{"replace", AutoQuote(oldPath)} + if oldVers != "" { + tokens = append(tokens, oldVers) + } + tokens = append(tokens, "=>", AutoQuote(newPath)) + if newVers != "" { + tokens = append(tokens, newVers) + } + + var hint *Line + for _, r := range f.Replace { + if r.Old.Path == oldPath && (oldVers == "" || r.Old.Version == oldVers) { + if need { + // Found replacement for old; update to use new. + r.New = new + f.Syntax.updateLine(r.Syntax, tokens...) + need = false + continue + } + // Already added; delete other replacements for same. + f.Syntax.removeLine(r.Syntax) + *r = Replace{} + } + if r.Old.Path == oldPath { + hint = r.Syntax + } + } + if need { + f.Replace = append(f.Replace, &Replace{Old: old, New: new, Syntax: f.Syntax.addLine(hint, tokens...)}) + } + return nil +} + +func (f *File) DropReplace(oldPath, oldVers string) error { + for _, r := range f.Replace { + if r.Old.Path == oldPath && r.Old.Version == oldVers { + f.Syntax.removeLine(r.Syntax) + *r = Replace{} + } + } + return nil +} + +func (f *File) SortBlocks() { + f.removeDups() // otherwise sorting is unsafe + + for _, stmt := range f.Syntax.Stmt { + block, ok := stmt.(*LineBlock) + if !ok { + continue + } + sort.Slice(block.Line, func(i, j int) bool { + li := block.Line[i] + lj := block.Line[j] + for k := 0; k < len(li.Token) && k < len(lj.Token); k++ { + if li.Token[k] != lj.Token[k] { + return li.Token[k] < lj.Token[k] + } + } + return len(li.Token) < len(lj.Token) + }) + } +} + +func (f *File) removeDups() { + have := make(map[module.Version]bool) + kill := make(map[*Line]bool) + for _, x := range f.Exclude { + if have[x.Mod] { + kill[x.Syntax] = true + continue + } + have[x.Mod] = true + } + var excl []*Exclude + for _, x := range f.Exclude { + if !kill[x.Syntax] { + excl = append(excl, x) + } + } + f.Exclude = excl + + have = make(map[module.Version]bool) + // Later replacements take priority over earlier ones. + for i := len(f.Replace) - 1; i >= 0; i-- { + x := f.Replace[i] + if have[x.Old] { + kill[x.Syntax] = true + continue + } + have[x.Old] = true + } + var repl []*Replace + for _, x := range f.Replace { + if !kill[x.Syntax] { + repl = append(repl, x) + } + } + f.Replace = repl + + var stmts []Expr + for _, stmt := range f.Syntax.Stmt { + switch stmt := stmt.(type) { + case *Line: + if kill[stmt] { + continue + } + case *LineBlock: + var lines []*Line + for _, line := range stmt.Line { + if !kill[line] { + lines = append(lines, line) + } + } + stmt.Line = lines + if len(lines) == 0 { + continue + } + } + stmts = append(stmts, stmt) + } + f.Syntax.Stmt = stmts +} diff --git a/vendor/github.com/rogpeppe/go-internal/module/module.go b/vendor/github.com/rogpeppe/go-internal/module/module.go new file mode 100644 index 0000000000000000000000000000000000000000..3ff6d9bf53563f96be5dbe7839a8372b471ca2c5 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/module/module.go @@ -0,0 +1,540 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package module defines the module.Version type +// along with support code. +package module + +// IMPORTANT NOTE +// +// This file essentially defines the set of valid import paths for the go command. +// There are many subtle considerations, including Unicode ambiguity, +// security, network, and file system representations. +// +// This file also defines the set of valid module path and version combinations, +// another topic with many subtle considerations. +// +// Changes to the semantics in this file require approval from rsc. + +import ( + "fmt" + "sort" + "strings" + "unicode" + "unicode/utf8" + + "github.com/rogpeppe/go-internal/semver" +) + +// A Version is defined by a module path and version pair. +type Version struct { + Path string + + // Version is usually a semantic version in canonical form. + // There are two exceptions to this general rule. + // First, the top-level target of a build has no specific version + // and uses Version = "". + // Second, during MVS calculations the version "none" is used + // to represent the decision to take no version of a given module. + Version string `json:",omitempty"` +} + +// Check checks that a given module path, version pair is valid. +// In addition to the path being a valid module path +// and the version being a valid semantic version, +// the two must correspond. +// For example, the path "yaml/v2" only corresponds to +// semantic versions beginning with "v2.". +func Check(path, version string) error { + if err := CheckPath(path); err != nil { + return err + } + if !semver.IsValid(version) { + return fmt.Errorf("malformed semantic version %v", version) + } + _, pathMajor, _ := SplitPathVersion(path) + if !MatchPathMajor(version, pathMajor) { + if pathMajor == "" { + pathMajor = "v0 or v1" + } + if pathMajor[0] == '.' { // .v1 + pathMajor = pathMajor[1:] + } + return fmt.Errorf("mismatched module path %v and version %v (want %v)", path, version, pathMajor) + } + return nil +} + +// firstPathOK reports whether r can appear in the first element of a module path. +// The first element of the path must be an LDH domain name, at least for now. +// To avoid case ambiguity, the domain name must be entirely lower case. +func firstPathOK(r rune) bool { + return r == '-' || r == '.' || + '0' <= r && r <= '9' || + 'a' <= r && r <= 'z' +} + +// pathOK reports whether r can appear in an import path element. +// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~. +// This matches what "go get" has historically recognized in import paths. +// TODO(rsc): We would like to allow Unicode letters, but that requires additional +// care in the safe encoding (see note below). +func pathOK(r rune) bool { + if r < utf8.RuneSelf { + return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' || + '0' <= r && r <= '9' || + 'A' <= r && r <= 'Z' || + 'a' <= r && r <= 'z' + } + return false +} + +// fileNameOK reports whether r can appear in a file name. +// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. +// If we expand the set of allowed characters here, we have to +// work harder at detecting potential case-folding and normalization collisions. +// See note about "safe encoding" below. +func fileNameOK(r rune) bool { + if r < utf8.RuneSelf { + // Entire set of ASCII punctuation, from which we remove characters: + // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ + // We disallow some shell special characters: " ' * < > ? ` | + // (Note that some of those are disallowed by the Windows file system as well.) + // We also disallow path separators / : and \ (fileNameOK is only called on path element characters). + // We allow spaces (U+0020) in file names. + const allowed = "!#$%&()+,-.=@[]^_{}~ " + if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' { + return true + } + for i := 0; i < len(allowed); i++ { + if rune(allowed[i]) == r { + return true + } + } + return false + } + // It may be OK to add more ASCII punctuation here, but only carefully. + // For example Windows disallows < > \, and macOS disallows :, so we must not allow those. + return unicode.IsLetter(r) +} + +// CheckPath checks that a module path is valid. +func CheckPath(path string) error { + if err := checkPath(path, false); err != nil { + return fmt.Errorf("malformed module path %q: %v", path, err) + } + i := strings.Index(path, "/") + if i < 0 { + i = len(path) + } + if i == 0 { + return fmt.Errorf("malformed module path %q: leading slash", path) + } + if !strings.Contains(path[:i], ".") { + return fmt.Errorf("malformed module path %q: missing dot in first path element", path) + } + if path[0] == '-' { + return fmt.Errorf("malformed module path %q: leading dash in first path element", path) + } + for _, r := range path[:i] { + if !firstPathOK(r) { + return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r) + } + } + if _, _, ok := SplitPathVersion(path); !ok { + return fmt.Errorf("malformed module path %q: invalid version", path) + } + return nil +} + +// CheckImportPath checks that an import path is valid. +func CheckImportPath(path string) error { + if err := checkPath(path, false); err != nil { + return fmt.Errorf("malformed import path %q: %v", path, err) + } + return nil +} + +// checkPath checks that a general path is valid. +// It returns an error describing why but not mentioning path. +// Because these checks apply to both module paths and import paths, +// the caller is expected to add the "malformed ___ path %q: " prefix. +// fileName indicates whether the final element of the path is a file name +// (as opposed to a directory name). +func checkPath(path string, fileName bool) error { + if !utf8.ValidString(path) { + return fmt.Errorf("invalid UTF-8") + } + if path == "" { + return fmt.Errorf("empty string") + } + if strings.Contains(path, "..") { + return fmt.Errorf("double dot") + } + if strings.Contains(path, "//") { + return fmt.Errorf("double slash") + } + if path[len(path)-1] == '/' { + return fmt.Errorf("trailing slash") + } + elemStart := 0 + for i, r := range path { + if r == '/' { + if err := checkElem(path[elemStart:i], fileName); err != nil { + return err + } + elemStart = i + 1 + } + } + if err := checkElem(path[elemStart:], fileName); err != nil { + return err + } + return nil +} + +// checkElem checks whether an individual path element is valid. +// fileName indicates whether the element is a file name (not a directory name). +func checkElem(elem string, fileName bool) error { + if elem == "" { + return fmt.Errorf("empty path element") + } + if strings.Count(elem, ".") == len(elem) { + return fmt.Errorf("invalid path element %q", elem) + } + if elem[0] == '.' && !fileName { + return fmt.Errorf("leading dot in path element") + } + if elem[len(elem)-1] == '.' { + return fmt.Errorf("trailing dot in path element") + } + charOK := pathOK + if fileName { + charOK = fileNameOK + } + for _, r := range elem { + if !charOK(r) { + return fmt.Errorf("invalid char %q", r) + } + } + + // Windows disallows a bunch of path elements, sadly. + // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file + short := elem + if i := strings.Index(short, "."); i >= 0 { + short = short[:i] + } + for _, bad := range badWindowsNames { + if strings.EqualFold(bad, short) { + return fmt.Errorf("disallowed path element %q", elem) + } + } + return nil +} + +// CheckFilePath checks whether a slash-separated file path is valid. +func CheckFilePath(path string) error { + if err := checkPath(path, true); err != nil { + return fmt.Errorf("malformed file path %q: %v", path, err) + } + return nil +} + +// badWindowsNames are the reserved file path elements on Windows. +// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file +var badWindowsNames = []string{ + "CON", + "PRN", + "AUX", + "NUL", + "COM1", + "COM2", + "COM3", + "COM4", + "COM5", + "COM6", + "COM7", + "COM8", + "COM9", + "LPT1", + "LPT2", + "LPT3", + "LPT4", + "LPT5", + "LPT6", + "LPT7", + "LPT8", + "LPT9", +} + +// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path +// and version is either empty or "/vN" for N >= 2. +// As a special case, gopkg.in paths are recognized directly; +// they require ".vN" instead of "/vN", and for all N, not just N >= 2. +func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { + if strings.HasPrefix(path, "gopkg.in/") { + return splitGopkgIn(path) + } + + i := len(path) + dot := false + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') { + if path[i-1] == '.' { + dot = true + } + i-- + } + if i <= 1 || path[i-1] != 'v' || path[i-2] != '/' { + return path, "", true + } + prefix, pathMajor = path[:i-2], path[i-2:] + if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" { + return path, "", false + } + return prefix, pathMajor, true +} + +// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths. +func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { + if !strings.HasPrefix(path, "gopkg.in/") { + return path, "", false + } + i := len(path) + if strings.HasSuffix(path, "-unstable") { + i -= len("-unstable") + } + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') { + i-- + } + if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' { + // All gopkg.in paths must end in vN for some N. + return path, "", false + } + prefix, pathMajor = path[:i-2], path[i-2:] + if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" { + return path, "", false + } + return prefix, pathMajor, true +} + +// MatchPathMajor reports whether the semantic version v +// matches the path major version pathMajor. +func MatchPathMajor(v, pathMajor string) bool { + if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { + pathMajor = strings.TrimSuffix(pathMajor, "-unstable") + } + if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" { + // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1. + // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405. + return true + } + m := semver.Major(v) + if pathMajor == "" { + return m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" + } + return (pathMajor[0] == '/' || pathMajor[0] == '.') && m == pathMajor[1:] +} + +// CanonicalVersion returns the canonical form of the version string v. +// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible". +func CanonicalVersion(v string) string { + cv := semver.Canonical(v) + if semver.Build(v) == "+incompatible" { + cv += "+incompatible" + } + return cv +} + +// Sort sorts the list by Path, breaking ties by comparing Versions. +func Sort(list []Version) { + sort.Slice(list, func(i, j int) bool { + mi := list[i] + mj := list[j] + if mi.Path != mj.Path { + return mi.Path < mj.Path + } + // To help go.sum formatting, allow version/file. + // Compare semver prefix by semver rules, + // file by string order. + vi := mi.Version + vj := mj.Version + var fi, fj string + if k := strings.Index(vi, "/"); k >= 0 { + vi, fi = vi[:k], vi[k:] + } + if k := strings.Index(vj, "/"); k >= 0 { + vj, fj = vj[:k], vj[k:] + } + if vi != vj { + return semver.Compare(vi, vj) < 0 + } + return fi < fj + }) +} + +// Safe encodings +// +// Module paths appear as substrings of file system paths +// (in the download cache) and of web server URLs in the proxy protocol. +// In general we cannot rely on file systems to be case-sensitive, +// nor can we rely on web servers, since they read from file systems. +// That is, we cannot rely on the file system to keep rsc.io/QUOTE +// and rsc.io/quote separate. Windows and macOS don't. +// Instead, we must never require two different casings of a file path. +// Because we want the download cache to match the proxy protocol, +// and because we want the proxy protocol to be possible to serve +// from a tree of static files (which might be stored on a case-insensitive +// file system), the proxy protocol must never require two different casings +// of a URL path either. +// +// One possibility would be to make the safe encoding be the lowercase +// hexadecimal encoding of the actual path bytes. This would avoid ever +// needing different casings of a file path, but it would be fairly illegible +// to most programmers when those paths appeared in the file system +// (including in file paths in compiler errors and stack traces) +// in web server logs, and so on. Instead, we want a safe encoding that +// leaves most paths unaltered. +// +// The safe encoding is this: +// replace every uppercase letter with an exclamation mark +// followed by the letter's lowercase equivalent. +// +// For example, +// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go. +// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy +// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus. +// +// Import paths that avoid upper-case letters are left unchanged. +// Note that because import paths are ASCII-only and avoid various +// problematic punctuation (like : < and >), the safe encoding is also ASCII-only +// and avoids the same problematic punctuation. +// +// Import paths have never allowed exclamation marks, so there is no +// need to define how to encode a literal !. +// +// Although paths are disallowed from using Unicode (see pathOK above), +// the eventual plan is to allow Unicode letters as well, to assume that +// file systems and URLs are Unicode-safe (storing UTF-8), and apply +// the !-for-uppercase convention. Note however that not all runes that +// are different but case-fold equivalent are an upper/lower pair. +// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin) +// are considered to case-fold to each other. When we do add Unicode +// letters, we must not assume that upper/lower are the only case-equivalent pairs. +// Perhaps the Kelvin symbol would be disallowed entirely, for example. +// Or perhaps it would encode as "!!k", or perhaps as "(212A)". +// +// Also, it would be nice to allow Unicode marks as well as letters, +// but marks include combining marks, and then we must deal not +// only with case folding but also normalization: both U+00E9 ('é') +// and U+0065 U+0301 ('e' followed by combining acute accent) +// look the same on the page and are treated by some file systems +// as the same path. If we do allow Unicode marks in paths, there +// must be some kind of normalization to allow only one canonical +// encoding of any character used in an import path. + +// EncodePath returns the safe encoding of the given module path. +// It fails if the module path is invalid. +func EncodePath(path string) (encoding string, err error) { + if err := CheckPath(path); err != nil { + return "", err + } + + return encodeString(path) +} + +// EncodeVersion returns the safe encoding of the given module version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func EncodeVersion(v string) (encoding string, err error) { + if err := checkElem(v, true); err != nil || strings.Contains(v, "!") { + return "", fmt.Errorf("disallowed version string %q", v) + } + return encodeString(v) +} + +func encodeString(s string) (encoding string, err error) { + haveUpper := false + for _, r := range s { + if r == '!' || r >= utf8.RuneSelf { + // This should be disallowed by CheckPath, but diagnose anyway. + // The correctness of the encoding loop below depends on it. + return "", fmt.Errorf("internal error: inconsistency in EncodePath") + } + if 'A' <= r && r <= 'Z' { + haveUpper = true + } + } + + if !haveUpper { + return s, nil + } + + var buf []byte + for _, r := range s { + if 'A' <= r && r <= 'Z' { + buf = append(buf, '!', byte(r+'a'-'A')) + } else { + buf = append(buf, byte(r)) + } + } + return string(buf), nil +} + +// DecodePath returns the module path of the given safe encoding. +// It fails if the encoding is invalid or encodes an invalid path. +func DecodePath(encoding string) (path string, err error) { + path, ok := decodeString(encoding) + if !ok { + return "", fmt.Errorf("invalid module path encoding %q", encoding) + } + if err := CheckPath(path); err != nil { + return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err) + } + return path, nil +} + +// DecodeVersion returns the version string for the given safe encoding. +// It fails if the encoding is invalid or encodes an invalid version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func DecodeVersion(encoding string) (v string, err error) { + v, ok := decodeString(encoding) + if !ok { + return "", fmt.Errorf("invalid version encoding %q", encoding) + } + if err := checkElem(v, true); err != nil { + return "", fmt.Errorf("disallowed version string %q", v) + } + return v, nil +} + +func decodeString(encoding string) (string, bool) { + var buf []byte + + bang := false + for _, r := range encoding { + if r >= utf8.RuneSelf { + return "", false + } + if bang { + bang = false + if r < 'a' || 'z' < r { + return "", false + } + buf = append(buf, byte(r+'A'-'a')) + continue + } + if r == '!' { + bang = true + continue + } + if 'A' <= r && r <= 'Z' { + return "", false + } + buf = append(buf, byte(r)) + } + if bang { + return "", false + } + return string(buf), true +} diff --git a/vendor/github.com/rogpeppe/go-internal/semver/semver.go b/vendor/github.com/rogpeppe/go-internal/semver/semver.go new file mode 100644 index 0000000000000000000000000000000000000000..4af7118e55d2ef7977266d9561027ca0f9935b02 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/semver/semver.go @@ -0,0 +1,388 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semver implements comparison of semantic version strings. +// In this package, semantic version strings must begin with a leading "v", +// as in "v1.0.0". +// +// The general form of a semantic version string accepted by this package is +// +// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] +// +// where square brackets indicate optional parts of the syntax; +// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; +// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers +// using only alphanumeric characters and hyphens; and +// all-numeric PRERELEASE identifiers must not have leading zeros. +// +// This package follows Semantic Versioning 2.0.0 (see semver.org) +// with two exceptions. First, it requires the "v" prefix. Second, it recognizes +// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) +// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. +package semver + +// parsed returns the parsed form of a semantic version string. +type parsed struct { + major string + minor string + patch string + short string + prerelease string + build string + err string +} + +// IsValid reports whether v is a valid semantic version string. +func IsValid(v string) bool { + _, ok := parse(v) + return ok +} + +// Canonical returns the canonical formatting of the semantic version v. +// It fills in any missing .MINOR or .PATCH and discards build metadata. +// Two semantic versions compare equal only if their canonical formattings +// are identical strings. +// The canonical invalid semantic version is the empty string. +func Canonical(v string) string { + p, ok := parse(v) + if !ok { + return "" + } + if p.build != "" { + return v[:len(v)-len(p.build)] + } + if p.short != "" { + return v + p.short + } + return v +} + +// Major returns the major version prefix of the semantic version v. +// For example, Major("v2.1.0") == "v2". +// If v is an invalid semantic version string, Major returns the empty string. +func Major(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return v[:1+len(pv.major)] +} + +// MajorMinor returns the major.minor version prefix of the semantic version v. +// For example, MajorMinor("v2.1.0") == "v2.1". +// If v is an invalid semantic version string, MajorMinor returns the empty string. +func MajorMinor(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + i := 1 + len(pv.major) + if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { + return v[:j] + } + return v[:i] + "." + pv.minor +} + +// Prerelease returns the prerelease suffix of the semantic version v. +// For example, Prerelease("v2.1.0-pre+meta") == "-pre". +// If v is an invalid semantic version string, Prerelease returns the empty string. +func Prerelease(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.prerelease +} + +// Build returns the build suffix of the semantic version v. +// For example, Build("v2.1.0+meta") == "+meta". +// If v is an invalid semantic version string, Build returns the empty string. +func Build(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.build +} + +// Compare returns an integer comparing two versions according to +// according to semantic version precedence. +// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. +// +// An invalid semantic version string is considered less than a valid one. +// All invalid semantic version strings compare equal to each other. +func Compare(v, w string) int { + pv, ok1 := parse(v) + pw, ok2 := parse(w) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return +1 + } + if c := compareInt(pv.major, pw.major); c != 0 { + return c + } + if c := compareInt(pv.minor, pw.minor); c != 0 { + return c + } + if c := compareInt(pv.patch, pw.patch); c != 0 { + return c + } + return comparePrerelease(pv.prerelease, pw.prerelease) +} + +// Max canonicalizes its arguments and then returns the version string +// that compares greater. +func Max(v, w string) string { + v = Canonical(v) + w = Canonical(w) + if Compare(v, w) > 0 { + return v + } + return w +} + +func parse(v string) (p parsed, ok bool) { + if v == "" || v[0] != 'v' { + p.err = "missing v prefix" + return + } + p.major, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad major version" + return + } + if v == "" { + p.minor = "0" + p.patch = "0" + p.short = ".0.0" + return + } + if v[0] != '.' { + p.err = "bad minor prefix" + ok = false + return + } + p.minor, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad minor version" + return + } + if v == "" { + p.patch = "0" + p.short = ".0" + return + } + if v[0] != '.' { + p.err = "bad patch prefix" + ok = false + return + } + p.patch, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad patch version" + return + } + if len(v) > 0 && v[0] == '-' { + p.prerelease, v, ok = parsePrerelease(v) + if !ok { + p.err = "bad prerelease" + return + } + } + if len(v) > 0 && v[0] == '+' { + p.build, v, ok = parseBuild(v) + if !ok { + p.err = "bad build" + return + } + } + if v != "" { + p.err = "junk on end" + ok = false + return + } + ok = true + return +} + +func parseInt(v string) (t, rest string, ok bool) { + if v == "" { + return + } + if v[0] < '0' || '9' < v[0] { + return + } + i := 1 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + if v[0] == '0' && i != 1 { + return + } + return v[:i], v[i:], true +} + +func parsePrerelease(v string) (t, rest string, ok bool) { + // "A pre-release version MAY be denoted by appending a hyphen and + // a series of dot separated identifiers immediately following the patch version. + // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. + // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." + if v == "" || v[0] != '-' { + return + } + i := 1 + start := 1 + for i < len(v) && v[i] != '+' { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i || isBadNum(v[start:i]) { + return + } + start = i + 1 + } + i++ + } + if start == i || isBadNum(v[start:i]) { + return + } + return v[:i], v[i:], true +} + +func parseBuild(v string) (t, rest string, ok bool) { + if v == "" || v[0] != '+' { + return + } + i := 1 + start := 1 + for i < len(v) { + if !isIdentChar(v[i]) { + return + } + if v[i] == '.' { + if start == i { + return + } + start = i + 1 + } + i++ + } + if start == i { + return + } + return v[:i], v[i:], true +} + +func isIdentChar(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' +} + +func isBadNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) && i > 1 && v[0] == '0' +} + +func isNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) +} + +func compareInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} + +func comparePrerelease(x, y string) int { + // "When major, minor, and patch are equal, a pre-release version has + // lower precedence than a normal version. + // Example: 1.0.0-alpha < 1.0.0. + // Precedence for two pre-release versions with the same major, minor, + // and patch version MUST be determined by comparing each dot separated + // identifier from left to right until a difference is found as follows: + // identifiers consisting of only digits are compared numerically and + // identifiers with letters or hyphens are compared lexically in ASCII + // sort order. Numeric identifiers always have lower precedence than + // non-numeric identifiers. A larger set of pre-release fields has a + // higher precedence than a smaller set, if all of the preceding + // identifiers are equal. + // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < + // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." + if x == y { + return 0 + } + if x == "" { + return +1 + } + if y == "" { + return -1 + } + for x != "" && y != "" { + x = x[1:] // skip - or . + y = y[1:] // skip - or . + var dx, dy string + dx, x = nextIdent(x) + dy, y = nextIdent(y) + if dx != dy { + ix := isNum(dx) + iy := isNum(dy) + if ix != iy { + if ix { + return -1 + } else { + return +1 + } + } + if ix { + if len(dx) < len(dy) { + return -1 + } + if len(dx) > len(dy) { + return +1 + } + } + if dx < dy { + return -1 + } else { + return +1 + } + } + } + if x == "" { + return -1 + } else { + return +1 + } +} + +func nextIdent(x string) (dx, rest string) { + i := 0 + for i < len(x) && x[i] != '.' { + i++ + } + return x[:i], x[i:] +} diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..298f0e2665e512a7d5053faf2ce4793c281efe6a --- /dev/null +++ b/vendor/github.com/spf13/afero/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go new file mode 100644 index 0000000000000000000000000000000000000000..f5b5e127cd6a793196506d007454eb8b244c247e --- /dev/null +++ b/vendor/github.com/spf13/afero/afero.go @@ -0,0 +1,108 @@ +// Copyright © 2014 Steve Francia <spf@spf13.com>. +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package afero provides types and methods for interacting with the filesystem, +// as an abstraction layer. + +// Afero also provides a few implementations that are mostly interoperable. One that +// uses the operating system filesystem, one that uses memory to store files +// (cross platform) and an interface that should be implemented if you want to +// provide your own filesystem. + +package afero + +import ( + "errors" + "io" + "os" + "time" +) + +type Afero struct { + Fs +} + +// File represents a file in the filesystem. +type File interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker + io.Writer + io.WriterAt + + Name() string + Readdir(count int) ([]os.FileInfo, error) + Readdirnames(n int) ([]string, error) + Stat() (os.FileInfo, error) + Sync() error + Truncate(size int64) error + WriteString(s string) (ret int, err error) +} + +// Fs is the filesystem interface. +// +// Any simulated or real filesystem should implement this interface. +type Fs interface { + // Create creates a file in the filesystem, returning the file and an + // error, if any happens. + Create(name string) (File, error) + + // Mkdir creates a directory in the filesystem, return an error if any + // happens. + Mkdir(name string, perm os.FileMode) error + + // MkdirAll creates a directory path and all parents that does not exist + // yet. + MkdirAll(path string, perm os.FileMode) error + + // Open opens a file, returning it or an error, if any happens. + Open(name string) (File, error) + + // OpenFile opens a file using the given flags and the given mode. + OpenFile(name string, flag int, perm os.FileMode) (File, error) + + // Remove removes a file identified by name, returning an error, if any + // happens. + Remove(name string) error + + // RemoveAll removes a directory path and any children it contains. It + // does not fail if the path does not exist (return nil). + RemoveAll(path string) error + + // Rename renames a file. + Rename(oldname, newname string) error + + // Stat returns a FileInfo describing the named file, or an error, if any + // happens. + Stat(name string) (os.FileInfo, error) + + // The name of this FileSystem + Name() string + + //Chmod changes the mode of the named file to mode. + Chmod(name string, mode os.FileMode) error + + //Chtimes changes the access and modification times of the named file + Chtimes(name string, atime time.Time, mtime time.Time) error +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go new file mode 100644 index 0000000000000000000000000000000000000000..616ff8ff74c5326169f257519571a45f99629c59 --- /dev/null +++ b/vendor/github.com/spf13/afero/basepath.go @@ -0,0 +1,180 @@ +package afero + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +var _ Lstater = (*BasePathFs)(nil) + +// The BasePathFs restricts all operations to a given path within an Fs. +// The given file name to the operations on this Fs will be prepended with +// the base path before calling the base Fs. +// Any file name (after filepath.Clean()) outside this base path will be +// treated as non existing file. +// +// Note that it does not clean the error messages on return, so you may +// reveal the real path on errors. +type BasePathFs struct { + source Fs + path string +} + +type BasePathFile struct { + File + path string +} + +func (f *BasePathFile) Name() string { + sourcename := f.File.Name() + return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) +} + +func NewBasePathFs(source Fs, path string) Fs { + return &BasePathFs{source: source, path: path} +} + +// on a file outside the base path it returns the given file name and an error, +// else the given file with the base path prepended +func (b *BasePathFs) RealPath(name string) (path string, err error) { + if err := validateBasePathName(name); err != nil { + return name, err + } + + bpath := filepath.Clean(b.path) + path = filepath.Clean(filepath.Join(bpath, name)) + if !strings.HasPrefix(path, bpath) { + return name, os.ErrNotExist + } + + return path, nil +} + +func validateBasePathName(name string) error { + if runtime.GOOS != "windows" { + // Not much to do here; + // the virtual file paths all look absolute on *nix. + return nil + } + + // On Windows a common mistake would be to provide an absolute OS path + // We could strip out the base part, but that would not be very portable. + if filepath.IsAbs(name) { + return os.ErrNotExist + } + + return nil +} + +func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chtimes", Path: name, Err: err} + } + return b.source.Chtimes(name, atime, mtime) +} + +func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chmod", Path: name, Err: err} + } + return b.source.Chmod(name, mode) +} + +func (b *BasePathFs) Name() string { + return "BasePathFs" +} + +func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "stat", Path: name, Err: err} + } + return b.source.Stat(name) +} + +func (b *BasePathFs) Rename(oldname, newname string) (err error) { + if oldname, err = b.RealPath(oldname); err != nil { + return &os.PathError{Op: "rename", Path: oldname, Err: err} + } + if newname, err = b.RealPath(newname); err != nil { + return &os.PathError{Op: "rename", Path: newname, Err: err} + } + return b.source.Rename(oldname, newname) +} + +func (b *BasePathFs) RemoveAll(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove_all", Path: name, Err: err} + } + return b.source.RemoveAll(name) +} + +func (b *BasePathFs) Remove(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + return b.source.Remove(name) +} + +func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "openfile", Path: name, Err: err} + } + sourcef, err := b.source.OpenFile(name, flag, mode) + if err != nil { + return nil, err + } + return &BasePathFile{sourcef, b.path}, nil +} + +func (b *BasePathFs) Open(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "open", Path: name, Err: err} + } + sourcef, err := b.source.Open(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.Mkdir(name, mode) +} + +func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.MkdirAll(name, mode) +} + +func (b *BasePathFs) Create(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "create", Path: name, Err: err} + } + sourcef, err := b.source.Create(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + name, err := b.RealPath(name) + if err != nil { + return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} + } + if lstater, ok := b.source.(Lstater); ok { + return lstater.LstatIfPossible(name) + } + fi, err := b.source.Stat(name) + return fi, false, err +} + +// vim: ts=4 sw=4 noexpandtab nolist syn=go diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go new file mode 100644 index 0000000000000000000000000000000000000000..29a26c67dd5d2636fc140d829ec4f2648e2b7aa8 --- /dev/null +++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go @@ -0,0 +1,290 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +// If the cache duration is 0, cache time will be unlimited, i.e. once +// a file is in the layer, the base will never be read again for this file. +// +// For cache times greater than 0, the modification time of a file is +// checked. Note that a lot of file system implementations only allow a +// resolution of a second for timestamps... or as the godoc for os.Chtimes() +// states: "The underlying filesystem may truncate or round the values to a +// less precise time unit." +// +// This caching union will forward all write calls also to the base file +// system first. To prevent writing to the base Fs, wrap it in a read-only +// filter - Note: this will also make the overlay read-only, for writing files +// in the overlay, use the overlay Fs directly, not via the union Fs. +type CacheOnReadFs struct { + base Fs + layer Fs + cacheTime time.Duration +} + +func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { + return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} +} + +type cacheState int + +const ( + // not present in the overlay, unknown if it exists in the base: + cacheMiss cacheState = iota + // present in the overlay and in base, base file is newer: + cacheStale + // present in the overlay - with cache time == 0 it may exist in the base, + // with cacheTime > 0 it exists in the base and is same age or newer in the + // overlay + cacheHit + // happens if someone writes directly to the overlay without + // going through this union + cacheLocal +) + +func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { + var lfi, bfi os.FileInfo + lfi, err = u.layer.Stat(name) + if err == nil { + if u.cacheTime == 0 { + return cacheHit, lfi, nil + } + if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { + bfi, err = u.base.Stat(name) + if err != nil { + return cacheLocal, lfi, nil + } + if bfi.ModTime().After(lfi.ModTime()) { + return cacheStale, bfi, nil + } + } + return cacheHit, lfi, nil + } + + if err == syscall.ENOENT || os.IsNotExist(err) { + return cacheMiss, nil, nil + } + + return cacheMiss, nil, err +} + +func (u *CacheOnReadFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chtimes(name, atime, mtime) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chtimes(name, atime, mtime) + } + if err != nil { + return err + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chmod(name, mode) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chmod(name, mode) + } + if err != nil { + return err + } + return u.layer.Chmod(name, mode) +} + +func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheMiss: + return u.base.Stat(name) + default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo + return fi, nil + } +} + +func (u *CacheOnReadFs) Rename(oldname, newname string) error { + st, _, err := u.cacheStatus(oldname) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Rename(oldname, newname) + case cacheStale, cacheMiss: + if err := u.copyToLayer(oldname); err != nil { + return err + } + err = u.base.Rename(oldname, newname) + } + if err != nil { + return err + } + return u.layer.Rename(oldname, newname) +} + +func (u *CacheOnReadFs) Remove(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.Remove(name) + } + if err != nil { + return err + } + return u.layer.Remove(name) +} + +func (u *CacheOnReadFs) RemoveAll(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.RemoveAll(name) + } + if err != nil { + return err + } + return u.layer.RemoveAll(name) +} + +func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + st, _, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheLocal, cacheHit: + default: + if err := u.copyToLayer(name); err != nil { + return nil, err + } + } + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + bfi, err := u.base.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + lfi, err := u.layer.OpenFile(name, flag, perm) + if err != nil { + bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? + return nil, err + } + return &UnionFile{Base: bfi, Layer: lfi}, nil + } + return u.layer.OpenFile(name, flag, perm) +} + +func (u *CacheOnReadFs) Open(name string) (File, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + + switch st { + case cacheLocal: + return u.layer.Open(name) + + case cacheMiss: + bfi, err := u.base.Stat(name) + if err != nil { + return nil, err + } + if bfi.IsDir() { + return u.base.Open(name) + } + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + + case cacheStale: + if !fi.IsDir() { + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + } + case cacheHit: + if !fi.IsDir() { + return u.layer.Open(name) + } + } + // the dirs from cacheHit, cacheStale fall down here: + bfile, _ := u.base.Open(name) + lfile, err := u.layer.Open(name) + if err != nil && bfile == nil { + return nil, err + } + return &UnionFile{Base: bfile, Layer: lfile}, nil +} + +func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { + err := u.base.Mkdir(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache +} + +func (u *CacheOnReadFs) Name() string { + return "CacheOnReadFs" +} + +func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { + err := u.base.MkdirAll(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CacheOnReadFs) Create(name string) (File, error) { + bfh, err := u.base.Create(name) + if err != nil { + return nil, err + } + lfh, err := u.layer.Create(name) + if err != nil { + // oops, see comment about OS_TRUNC above, should we remove? then we have to + // remember if the file did not exist before + bfh.Close() + return nil, err + } + return &UnionFile{Base: bfh, Layer: lfh}, nil +} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go new file mode 100644 index 0000000000000000000000000000000000000000..5728243d962ddc68f58aaba3c21d016f3444c8a4 --- /dev/null +++ b/vendor/github.com/spf13/afero/const_bsds.go @@ -0,0 +1,22 @@ +// Copyright © 2016 Steve Francia <spf@spf13.com>. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin openbsd freebsd netbsd dragonfly + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..968fc2783e5e89de05143d7c213b964f2665d97d --- /dev/null +++ b/vendor/github.com/spf13/afero/const_win_unix.go @@ -0,0 +1,25 @@ +// Copyright © 2016 Steve Francia <spf@spf13.com>. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +build !darwin +// +build !openbsd +// +build !freebsd +// +build !dragonfly +// +build !netbsd + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go new file mode 100644 index 0000000000000000000000000000000000000000..e8108a851e15333ac224837b3bedf78b11a89bf7 --- /dev/null +++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -0,0 +1,293 @@ +package afero + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" +) + +var _ Lstater = (*CopyOnWriteFs)(nil) + +// The CopyOnWriteFs is a union filesystem: a read only base file system with +// a possibly writeable layer on top. Changes to the file system will only +// be made in the overlay: Changing an existing file in the base layer which +// is not present in the overlay will copy the file to the overlay ("changing" +// includes also calls to e.g. Chtimes() and Chmod()). +// +// Reading directories is currently only supported via Open(), not OpenFile(). +type CopyOnWriteFs struct { + base Fs + layer Fs +} + +func NewCopyOnWriteFs(base Fs, layer Fs) Fs { + return &CopyOnWriteFs{base: base, layer: layer} +} + +// Returns true if the file is not in the overlay +func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { + if _, err := u.layer.Stat(name); err == nil { + return false, nil + } + _, err := u.base.Stat(name) + if err != nil { + if oerr, ok := err.(*os.PathError); ok { + if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { + return false, nil + } + } + if err == syscall.ENOENT { + return false, nil + } + } + return true, err +} + +func (u *CopyOnWriteFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chmod(name, mode) +} + +func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { + fi, err := u.layer.Stat(name) + if err != nil { + isNotExist := u.isNotExist(err) + if isNotExist { + return u.base.Stat(name) + } + return nil, err + } + return fi, nil +} + +func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + llayer, ok1 := u.layer.(Lstater) + lbase, ok2 := u.base.(Lstater) + + if ok1 { + fi, b, err := llayer.LstatIfPossible(name) + if err == nil { + return fi, b, nil + } + + if !u.isNotExist(err) { + return nil, b, err + } + } + + if ok2 { + fi, b, err := lbase.LstatIfPossible(name) + if err == nil { + return fi, b, nil + } + if !u.isNotExist(err) { + return nil, b, err + } + } + + fi, err := u.Stat(name) + + return fi, false, err +} + +func (u *CopyOnWriteFs) isNotExist(err error) bool { + if e, ok := err.(*os.PathError); ok { + err = e.Err + } + if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { + return true + } + return false +} + +// Renaming files present only in the base layer is not permitted +func (u *CopyOnWriteFs) Rename(oldname, newname string) error { + b, err := u.isBaseFile(oldname) + if err != nil { + return err + } + if b { + return syscall.EPERM + } + return u.layer.Rename(oldname, newname) +} + +// Removing files present only in the base layer is not permitted. If +// a file is present in the base layer and the overlay, only the overlay +// will be removed. +func (u *CopyOnWriteFs) Remove(name string) error { + err := u.layer.Remove(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) RemoveAll(name string) error { + err := u.layer.RemoveAll(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + if b { + if err = u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + dir := filepath.Dir(name) + isaDir, err := IsDir(u.base, dir) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if isaDir { + if err = u.layer.MkdirAll(dir, 0777); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + isaDir, err = IsDir(u.layer, dir) + if err != nil { + return nil, err + } + if isaDir { + return u.layer.OpenFile(name, flag, perm) + } + + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? + } + if b { + return u.base.OpenFile(name, flag, perm) + } + return u.layer.OpenFile(name, flag, perm) +} + +// This function handles the 9 different possibilities caused +// by the union which are the intersection of the following... +// layer: doesn't exist, exists as a file, and exists as a directory +// base: doesn't exist, exists as a file, and exists as a directory +func (u *CopyOnWriteFs) Open(name string) (File, error) { + // Since the overlay overrides the base we check that first + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + // If overlay doesn't exist, return the base (base state irrelevant) + if b { + return u.base.Open(name) + } + + // If overlay is a file, return it (base state irrelevant) + dir, err := IsDir(u.layer, name) + if err != nil { + return nil, err + } + if !dir { + return u.layer.Open(name) + } + + // Overlay is a directory, base state now matters. + // Base state has 3 states to check but 2 outcomes: + // A. It's a file or non-readable in the base (return just the overlay) + // B. It's an accessible directory in the base (return a UnionFile) + + // If base is file or nonreadable, return overlay + dir, err = IsDir(u.base, name) + if !dir || err != nil { + return u.layer.Open(name) + } + + // Both base & layer are directories + // Return union file (if opens are without error) + bfile, bErr := u.base.Open(name) + lfile, lErr := u.layer.Open(name) + + // If either have errors at this point something is very wrong. Return nil and the errors + if bErr != nil || lErr != nil { + return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) + } + + return &UnionFile{Base: bfile, Layer: lfile}, nil +} + +func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + return ErrFileExists + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Name() string { + return "CopyOnWriteFs" +} + +func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + // This is in line with how os.MkdirAll behaves. + return nil + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Create(name string) (File, error) { + return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) +} diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go new file mode 100644 index 0000000000000000000000000000000000000000..c42193688ceb626d75d6a4f97b4cfa6d30ec2e00 --- /dev/null +++ b/vendor/github.com/spf13/afero/httpFs.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia <spf@spf13.com>. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "errors" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "time" +) + +type httpDir struct { + basePath string + fs HttpFs +} + +func (d httpDir) Open(name string) (http.File, error) { + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return nil, errors.New("http: invalid character in file path") + } + dir := string(d.basePath) + if dir == "" { + dir = "." + } + + f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) + if err != nil { + return nil, err + } + return f, nil +} + +type HttpFs struct { + source Fs +} + +func NewHttpFs(source Fs) *HttpFs { + return &HttpFs{source: source} +} + +func (h HttpFs) Dir(s string) *httpDir { + return &httpDir{basePath: s, fs: h} +} + +func (h HttpFs) Name() string { return "h HttpFs" } + +func (h HttpFs) Create(name string) (File, error) { + return h.source.Create(name) +} + +func (h HttpFs) Chmod(name string, mode os.FileMode) error { + return h.source.Chmod(name, mode) +} + +func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return h.source.Chtimes(name, atime, mtime) +} + +func (h HttpFs) Mkdir(name string, perm os.FileMode) error { + return h.source.Mkdir(name, perm) +} + +func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { + return h.source.MkdirAll(path, perm) +} + +func (h HttpFs) Open(name string) (http.File, error) { + f, err := h.source.Open(name) + if err == nil { + if httpfile, ok := f.(http.File); ok { + return httpfile, nil + } + } + return nil, err +} + +func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + return h.source.OpenFile(name, flag, perm) +} + +func (h HttpFs) Remove(name string) error { + return h.source.Remove(name) +} + +func (h HttpFs) RemoveAll(path string) error { + return h.source.RemoveAll(path) +} + +func (h HttpFs) Rename(oldname, newname string) error { + return h.source.Rename(oldname, newname) +} + +func (h HttpFs) Stat(name string) (os.FileInfo, error) { + return h.source.Stat(name) +} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go new file mode 100644 index 0000000000000000000000000000000000000000..5c3a3d8fffc98b973a7a89ee5728de12a4e91ed9 --- /dev/null +++ b/vendor/github.com/spf13/afero/ioutil.go @@ -0,0 +1,230 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia <spf@spf13.com> +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + "time" +) + +// byName implements sort.Interface. +type byName []os.FileInfo + +func (f byName) Len() int { return len(f) } +func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } +func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// ReadDir reads the directory named by dirname and returns +// a list of sorted directory entries. +func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { + return ReadDir(a.Fs, dirname) +} + +func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + list, err := f.Readdir(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Sort(byName(list)) + return list, nil +} + +// ReadFile reads the file named by filename and returns the contents. +// A successful call returns err == nil, not err == EOF. Because ReadFile +// reads the whole file, it does not treat an EOF from Read as an error +// to be reported. +func (a Afero) ReadFile(filename string) ([]byte, error) { + return ReadFile(a.Fs, filename) +} + +func ReadFile(fs Fs, filename string) ([]byte, error) { + f, err := fs.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + // It's a good but not certain bet that FileInfo will tell us exactly how much to + // read, so let's try it but be prepared for the answer to be wrong. + var n int64 + + if fi, err := f.Stat(); err == nil { + // Don't preallocate a huge buffer, just in case. + if size := fi.Size(); size < 1e9 { + n = size + } + } + // As initial capacity for readAll, use n + a little extra in case Size is zero, + // and to avoid another allocation after Read has filled the buffer. The readAll + // call will read into its allocated internal buffer cheaply. If the size was + // wrong, we'll either waste some space off the end or reallocate as needed, but + // in the overwhelmingly common case we'll get it just right. + return readAll(f, n+bytes.MinRead) +} + +// readAll reads from r until an error or EOF and returns the data it read +// from the internal buffer allocated with a specified capacity. +func readAll(r io.Reader, capacity int64) (b []byte, err error) { + buf := bytes.NewBuffer(make([]byte, 0, capacity)) + // If the buffer overflows, we will get bytes.ErrTooLarge. + // Return that as an error. Any other panic remains. + defer func() { + e := recover() + if e == nil { + return + } + if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { + err = panicErr + } else { + panic(e) + } + }() + _, err = buf.ReadFrom(r) + return buf.Bytes(), err +} + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +func ReadAll(r io.Reader) ([]byte, error) { + return readAll(r, bytes.MinRead) +} + +// WriteFile writes data to a file named by filename. +// If the file does not exist, WriteFile creates it with permissions perm; +// otherwise WriteFile truncates it before writing. +func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { + return WriteFile(a.Fs, filename, data, perm) +} + +func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { + f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +// Random number state. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func (a Afero) TempFile(dir, prefix string) (f File, err error) { + return TempFile(a.Fs, dir, prefix) +} + +func TempFile(fs Fs, dir, prefix string) (f File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} + +// TempDir creates a new temporary directory in the directory dir +// with a name beginning with prefix and returns the path of the +// new directory. If dir is the empty string, TempDir uses the +// default directory for temporary files (see os.TempDir). +// Multiple programs calling TempDir simultaneously +// will not choose the same directory. It is the caller's responsibility +// to remove the directory when no longer needed. +func (a Afero) TempDir(dir, prefix string) (name string, err error) { + return TempDir(a.Fs, dir, prefix) +} +func TempDir(fs Fs, dir, prefix string) (name string, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + try := filepath.Join(dir, prefix+nextSuffix()) + err = fs.Mkdir(try, 0700) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + if err == nil { + name = try + } + break + } + return +} diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go new file mode 100644 index 0000000000000000000000000000000000000000..89c1bfc0a7d7a53a2b6211266ac431a3d76dcfdb --- /dev/null +++ b/vendor/github.com/spf13/afero/lstater.go @@ -0,0 +1,27 @@ +// Copyright © 2018 Steve Francia <spf@spf13.com>. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" +) + +// Lstater is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. +// Else it will call Stat. +// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. +type Lstater interface { + LstatIfPossible(name string) (os.FileInfo, bool, error) +} diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go new file mode 100644 index 0000000000000000000000000000000000000000..c18a87fb713dcde04da7c7fcb663b980236b9126 --- /dev/null +++ b/vendor/github.com/spf13/afero/match.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia <spf@spf13.com>. +// Copyright 2009 The Go Authors. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "path/filepath" + "sort" + "strings" +) + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. The syntax of patterns is the same +// as in Match. The pattern may describe hierarchical names such as +// /usr/*/bin/ed (assuming the Separator is '/'). +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// This was adapted from (http://golang.org/pkg/path/filepath) and uses several +// built-ins from that package. +func Glob(fs Fs, pattern string) (matches []string, err error) { + if !hasMeta(pattern) { + // Lstat not supported by a ll filesystems. + if _, err = lstatIfPossible(fs, pattern); err != nil { + return nil, nil + } + return []string{pattern}, nil + } + + dir, file := filepath.Split(pattern) + switch dir { + case "": + dir = "." + case string(filepath.Separator): + // nothing + default: + dir = dir[0 : len(dir)-1] // chop off trailing separator + } + + if !hasMeta(dir) { + return glob(fs, dir, file, nil) + } + + var m []string + m, err = Glob(fs, dir) + if err != nil { + return + } + for _, d := range m { + matches, err = glob(fs, d, file, matches) + if err != nil { + return + } + } + return +} + +// glob searches for files matching pattern in the directory dir +// and appends them to matches. If the directory cannot be +// opened, it returns the existing matches. New matches are +// added in lexicographical order. +func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { + m = matches + fi, err := fs.Stat(dir) + if err != nil { + return + } + if !fi.IsDir() { + return + } + d, err := fs.Open(dir) + if err != nil { + return + } + defer d.Close() + + names, _ := d.Readdirnames(-1) + sort.Strings(names) + + for _, n := range names { + matched, err := filepath.Match(pattern, n) + if err != nil { + return m, err + } + if matched { + m = append(m, filepath.Join(dir, n)) + } + } + return +} + +// hasMeta reports whether path contains any of the magic characters +// recognized by Match. +func hasMeta(path string) bool { + // TODO(niemeyer): Should other magic characters be added here? + return strings.IndexAny(path, "*?[") >= 0 +} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go new file mode 100644 index 0000000000000000000000000000000000000000..e104013f45712024294836f2e8e90a333303cdec --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dir.go @@ -0,0 +1,37 @@ +// Copyright © 2014 Steve Francia <spf@spf13.com>. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +type Dir interface { + Len() int + Names() []string + Files() []*FileData + Add(*FileData) + Remove(*FileData) +} + +func RemoveFromMemDir(dir *FileData, f *FileData) { + dir.memDir.Remove(f) +} + +func AddToMemDir(dir *FileData, f *FileData) { + dir.memDir.Add(f) +} + +func InitializeDir(d *FileData) { + if d.memDir == nil { + d.dir = true + d.memDir = &DirMap{} + } +} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go new file mode 100644 index 0000000000000000000000000000000000000000..03a57ee5b52e8d28663c2eb4dddc44841b604df6 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dirmap.go @@ -0,0 +1,43 @@ +// Copyright © 2015 Steve Francia <spf@spf13.com>. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import "sort" + +type DirMap map[string]*FileData + +func (m DirMap) Len() int { return len(m) } +func (m DirMap) Add(f *FileData) { m[f.name] = f } +func (m DirMap) Remove(f *FileData) { delete(m, f.name) } +func (m DirMap) Files() (files []*FileData) { + for _, f := range m { + files = append(files, f) + } + sort.Sort(filesSorter(files)) + return files +} + +// implement sort.Interface for []*FileData +type filesSorter []*FileData + +func (s filesSorter) Len() int { return len(s) } +func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } + +func (m DirMap) Names() (names []string) { + for x := range m { + names = append(names, x) + } + return names +} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go new file mode 100644 index 0000000000000000000000000000000000000000..7af2fb56ff46c2126e704c0643cdc1f008f61bb7 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -0,0 +1,317 @@ +// Copyright © 2015 Steve Francia <spf@spf13.com>. +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import ( + "bytes" + "errors" + "io" + "os" + "path/filepath" + "sync" + "sync/atomic" +) + +import "time" + +const FilePathSeparator = string(filepath.Separator) + +type File struct { + // atomic requires 64-bit alignment for struct field access + at int64 + readDirCount int64 + closed bool + readOnly bool + fileData *FileData +} + +func NewFileHandle(data *FileData) *File { + return &File{fileData: data} +} + +func NewReadOnlyFileHandle(data *FileData) *File { + return &File{fileData: data, readOnly: true} +} + +func (f File) Data() *FileData { + return f.fileData +} + +type FileData struct { + sync.Mutex + name string + data []byte + memDir Dir + dir bool + mode os.FileMode + modtime time.Time +} + +func (d *FileData) Name() string { + d.Lock() + defer d.Unlock() + return d.name +} + +func CreateFile(name string) *FileData { + return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} +} + +func CreateDir(name string) *FileData { + return &FileData{name: name, memDir: &DirMap{}, dir: true} +} + +func ChangeFileName(f *FileData, newname string) { + f.Lock() + f.name = newname + f.Unlock() +} + +func SetMode(f *FileData, mode os.FileMode) { + f.Lock() + f.mode = mode + f.Unlock() +} + +func SetModTime(f *FileData, mtime time.Time) { + f.Lock() + setModTime(f, mtime) + f.Unlock() +} + +func setModTime(f *FileData, mtime time.Time) { + f.modtime = mtime +} + +func GetFileInfo(f *FileData) *FileInfo { + return &FileInfo{f} +} + +func (f *File) Open() error { + atomic.StoreInt64(&f.at, 0) + atomic.StoreInt64(&f.readDirCount, 0) + f.fileData.Lock() + f.closed = false + f.fileData.Unlock() + return nil +} + +func (f *File) Close() error { + f.fileData.Lock() + f.closed = true + if !f.readOnly { + setModTime(f.fileData, time.Now()) + } + f.fileData.Unlock() + return nil +} + +func (f *File) Name() string { + return f.fileData.Name() +} + +func (f *File) Stat() (os.FileInfo, error) { + return &FileInfo{f.fileData}, nil +} + +func (f *File) Sync() error { + return nil +} + +func (f *File) Readdir(count int) (res []os.FileInfo, err error) { + if !f.fileData.dir { + return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} + } + var outLength int64 + + f.fileData.Lock() + files := f.fileData.memDir.Files()[f.readDirCount:] + if count > 0 { + if len(files) < count { + outLength = int64(len(files)) + } else { + outLength = int64(count) + } + if len(files) == 0 { + err = io.EOF + } + } else { + outLength = int64(len(files)) + } + f.readDirCount += outLength + f.fileData.Unlock() + + res = make([]os.FileInfo, outLength) + for i := range res { + res[i] = &FileInfo{files[i]} + } + + return res, err +} + +func (f *File) Readdirnames(n int) (names []string, err error) { + fi, err := f.Readdir(n) + names = make([]string, len(fi)) + for i, f := range fi { + _, names[i] = filepath.Split(f.Name()) + } + return names, err +} + +func (f *File) Read(b []byte) (n int, err error) { + f.fileData.Lock() + defer f.fileData.Unlock() + if f.closed == true { + return 0, ErrFileClosed + } + if len(b) > 0 && int(f.at) == len(f.fileData.data) { + return 0, io.EOF + } + if int(f.at) > len(f.fileData.data) { + return 0, io.ErrUnexpectedEOF + } + if len(f.fileData.data)-int(f.at) >= len(b) { + n = len(b) + } else { + n = len(f.fileData.data) - int(f.at) + } + copy(b, f.fileData.data[f.at:f.at+int64(n)]) + atomic.AddInt64(&f.at, int64(n)) + return +} + +func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Read(b) +} + +func (f *File) Truncate(size int64) error { + if f.closed == true { + return ErrFileClosed + } + if f.readOnly { + return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + if size < 0 { + return ErrOutOfRange + } + if size > int64(len(f.fileData.data)) { + diff := size - int64(len(f.fileData.data)) + f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) + } else { + f.fileData.data = f.fileData.data[0:size] + } + setModTime(f.fileData, time.Now()) + return nil +} + +func (f *File) Seek(offset int64, whence int) (int64, error) { + if f.closed == true { + return 0, ErrFileClosed + } + switch whence { + case 0: + atomic.StoreInt64(&f.at, offset) + case 1: + atomic.AddInt64(&f.at, int64(offset)) + case 2: + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) + } + return f.at, nil +} + +func (f *File) Write(b []byte) (n int, err error) { + if f.readOnly { + return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + n = len(b) + cur := atomic.LoadInt64(&f.at) + f.fileData.Lock() + defer f.fileData.Unlock() + diff := cur - int64(len(f.fileData.data)) + var tail []byte + if n+int(cur) < len(f.fileData.data) { + tail = f.fileData.data[n+int(cur):] + } + if diff > 0 { + f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...) + f.fileData.data = append(f.fileData.data, tail...) + } else { + f.fileData.data = append(f.fileData.data[:cur], b...) + f.fileData.data = append(f.fileData.data, tail...) + } + setModTime(f.fileData, time.Now()) + + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))) + return +} + +func (f *File) WriteAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Write(b) +} + +func (f *File) WriteString(s string) (ret int, err error) { + return f.Write([]byte(s)) +} + +func (f *File) Info() *FileInfo { + return &FileInfo{f.fileData} +} + +type FileInfo struct { + *FileData +} + +// Implements os.FileInfo +func (s *FileInfo) Name() string { + s.Lock() + _, name := filepath.Split(s.name) + s.Unlock() + return name +} +func (s *FileInfo) Mode() os.FileMode { + s.Lock() + defer s.Unlock() + return s.mode +} +func (s *FileInfo) ModTime() time.Time { + s.Lock() + defer s.Unlock() + return s.modtime +} +func (s *FileInfo) IsDir() bool { + s.Lock() + defer s.Unlock() + return s.dir +} +func (s *FileInfo) Sys() interface{} { return nil } +func (s *FileInfo) Size() int64 { + if s.IsDir() { + return int64(42) + } + s.Lock() + defer s.Unlock() + return int64(len(s.data)) +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go new file mode 100644 index 0000000000000000000000000000000000000000..09498e70fbaa6e350ba699a1895d602cd957a496 --- /dev/null +++ b/vendor/github.com/spf13/afero/memmap.go @@ -0,0 +1,365 @@ +// Copyright © 2014 Steve Francia <spf@spf13.com>. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/spf13/afero/mem" +) + +type MemMapFs struct { + mu sync.RWMutex + data map[string]*mem.FileData + init sync.Once +} + +func NewMemMapFs() Fs { + return &MemMapFs{} +} + +func (m *MemMapFs) getData() map[string]*mem.FileData { + m.init.Do(func() { + m.data = make(map[string]*mem.FileData) + // Root should always exist, right? + // TODO: what about windows? + m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator) + }) + return m.data +} + +func (*MemMapFs) Name() string { return "MemMapFS" } + +func (m *MemMapFs) Create(name string) (File, error) { + name = normalizePath(name) + m.mu.Lock() + file := mem.CreateFile(name) + m.getData()[name] = file + m.registerWithParent(file) + m.mu.Unlock() + return mem.NewFileHandle(file), nil +} + +func (m *MemMapFs) unRegisterWithParent(fileName string) error { + f, err := m.lockfreeOpen(fileName) + if err != nil { + return err + } + parent := m.findParent(f) + if parent == nil { + log.Panic("parent of ", f.Name(), " is nil") + } + + parent.Lock() + mem.RemoveFromMemDir(parent, f) + parent.Unlock() + return nil +} + +func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { + pdir, _ := filepath.Split(f.Name()) + pdir = filepath.Clean(pdir) + pfile, err := m.lockfreeOpen(pdir) + if err != nil { + return nil + } + return pfile +} + +func (m *MemMapFs) registerWithParent(f *mem.FileData) { + if f == nil { + return + } + parent := m.findParent(f) + if parent == nil { + pdir := filepath.Dir(filepath.Clean(f.Name())) + err := m.lockfreeMkdir(pdir, 0777) + if err != nil { + //log.Println("Mkdir error:", err) + return + } + parent, err = m.lockfreeOpen(pdir) + if err != nil { + //log.Println("Open after Mkdir error:", err) + return + } + } + + parent.Lock() + mem.InitializeDir(parent) + mem.AddToMemDir(parent, f) + parent.Unlock() +} + +func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + x, ok := m.getData()[name] + if ok { + // Only return ErrFileExists if it's a file, not a directory. + i := mem.FileInfo{FileData: x} + if !i.IsDir() { + return ErrFileExists + } + } else { + item := mem.CreateDir(name) + m.getData()[name] = item + m.registerWithParent(item) + } + return nil +} + +func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + _, ok := m.getData()[name] + m.mu.RUnlock() + if ok { + return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} + } + + m.mu.Lock() + item := mem.CreateDir(name) + m.getData()[name] = item + m.registerWithParent(item) + m.mu.Unlock() + + m.Chmod(name, perm|os.ModeDir) + + return nil +} + +func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { + err := m.Mkdir(path, perm) + if err != nil { + if err.(*os.PathError).Err == ErrFileExists { + return nil + } + return err + } + return nil +} + +// Handle some relative paths +func normalizePath(path string) string { + path = filepath.Clean(path) + + switch path { + case ".": + return FilePathSeparator + case "..": + return FilePathSeparator + default: + return path + } +} + +func (m *MemMapFs) Open(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewReadOnlyFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) openWrite(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) open(name string) (*mem.FileData, error) { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} + } + return f, nil +} + +func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { + name = normalizePath(name) + f, ok := m.getData()[name] + if ok { + return f, nil + } else { + return nil, ErrFileNotFound + } +} + +func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + chmod := false + file, err := m.openWrite(name) + if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { + file, err = m.Create(name) + chmod = true + } + if err != nil { + return nil, err + } + if flag == os.O_RDONLY { + file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) + } + if flag&os.O_APPEND > 0 { + _, err = file.Seek(0, os.SEEK_END) + if err != nil { + file.Close() + return nil, err + } + } + if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { + err = file.Truncate(0) + if err != nil { + file.Close() + return nil, err + } + } + if chmod { + m.Chmod(name, perm) + } + return file, nil +} + +func (m *MemMapFs) Remove(name string) error { + name = normalizePath(name) + + m.mu.Lock() + defer m.mu.Unlock() + + if _, ok := m.getData()[name]; ok { + err := m.unRegisterWithParent(name) + if err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + delete(m.getData(), name) + } else { + return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} + } + return nil +} + +func (m *MemMapFs) RemoveAll(path string) error { + path = normalizePath(path) + m.mu.Lock() + m.unRegisterWithParent(path) + m.mu.Unlock() + + m.mu.RLock() + defer m.mu.RUnlock() + + for p, _ := range m.getData() { + if strings.HasPrefix(p, path) { + m.mu.RUnlock() + m.mu.Lock() + delete(m.getData(), p) + m.mu.Unlock() + m.mu.RLock() + } + } + return nil +} + +func (m *MemMapFs) Rename(oldname, newname string) error { + oldname = normalizePath(oldname) + newname = normalizePath(newname) + + if oldname == newname { + return nil + } + + m.mu.RLock() + defer m.mu.RUnlock() + if _, ok := m.getData()[oldname]; ok { + m.mu.RUnlock() + m.mu.Lock() + m.unRegisterWithParent(oldname) + fileData := m.getData()[oldname] + delete(m.getData(), oldname) + mem.ChangeFileName(fileData, newname) + m.getData()[newname] = fileData + m.registerWithParent(fileData) + m.mu.Unlock() + m.mu.RLock() + } else { + return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} + } + return nil +} + +func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { + f, err := m.Open(name) + if err != nil { + return nil, err + } + fi := mem.GetFileInfo(f.(*mem.File).Data()) + return fi, nil +} + +func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetMode(f, mode) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetModTime(f, mtime) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) List() { + for _, x := range m.data { + y := mem.FileInfo{FileData: x} + fmt.Println(x.Name(), y.Size()) + } +} + +// func debugMemMapList(fs Fs) { +// if x, ok := fs.(*MemMapFs); ok { +// x.List() +// } +// } diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go new file mode 100644 index 0000000000000000000000000000000000000000..13cc1b84c933f9bbc405a1104ece91cd295d31ed --- /dev/null +++ b/vendor/github.com/spf13/afero/os.go @@ -0,0 +1,101 @@ +// Copyright © 2014 Steve Francia <spf@spf13.com>. +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "time" +) + +var _ Lstater = (*OsFs)(nil) + +// OsFs is a Fs implementation that uses functions provided by the os package. +// +// For details in any method, check the documentation of the os package +// (http://golang.org/pkg/os/). +type OsFs struct{} + +func NewOsFs() Fs { + return &OsFs{} +} + +func (OsFs) Name() string { return "OsFs" } + +func (OsFs) Create(name string) (File, error) { + f, e := os.Create(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Mkdir(name string, perm os.FileMode) error { + return os.Mkdir(name, perm) +} + +func (OsFs) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func (OsFs) Open(name string) (File, error) { + f, e := os.Open(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + f, e := os.OpenFile(name, flag, perm) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Remove(name string) error { + return os.Remove(name) +} + +func (OsFs) RemoveAll(path string) error { + return os.RemoveAll(path) +} + +func (OsFs) Rename(oldname, newname string) error { + return os.Rename(oldname, newname) +} + +func (OsFs) Stat(name string) (os.FileInfo, error) { + return os.Stat(name) +} + +func (OsFs) Chmod(name string, mode os.FileMode) error { + return os.Chmod(name, mode) +} + +func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return os.Chtimes(name, atime, mtime) +} + +func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + fi, err := os.Lstat(name) + return fi, true, err +} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go new file mode 100644 index 0000000000000000000000000000000000000000..18f60a0f6b69c66f6d7d3be9d6df1788aedd2bab --- /dev/null +++ b/vendor/github.com/spf13/afero/path.go @@ -0,0 +1,106 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia <spf@spf13.com> +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "path/filepath" + "sort" +) + +// readDirNames reads the directory named by dirname and returns +// a sorted list of directory entries. +// adapted from https://golang.org/src/path/filepath/path.go +func readDirNames(fs Fs, dirname string) ([]string, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + names, err := f.Readdirnames(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// walk recursively descends path, calling walkFn +// adapted from https://golang.org/src/path/filepath/path.go +func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + err := walkFn(path, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + + if !info.IsDir() { + return nil + } + + names, err := readDirNames(fs, path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + filename := filepath.Join(path, name) + fileInfo, err := lstatIfPossible(fs, filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walk(fs, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// if the filesystem supports it, use Lstat, else use fs.Stat +func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { + if lfs, ok := fs.(Lstater); ok { + fi, _, err := lfs.LstatIfPossible(path) + return fi, err + } + return fs.Stat(path) +} + +// Walk walks the file tree rooted at root, calling walkFn for each file or +// directory in the tree, including root. All errors that arise visiting files +// and directories are filtered by walkFn. The files are walked in lexical +// order, which makes the output deterministic but means that for very +// large directories Walk can be inefficient. +// Walk does not follow symbolic links. + +func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { + return Walk(a.Fs, root, walkFn) +} + +func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { + info, err := lstatIfPossible(fs, root) + if err != nil { + return walkFn(root, nil, err) + } + return walk(fs, root, info, walkFn) +} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go new file mode 100644 index 0000000000000000000000000000000000000000..c6376ec373aaa9f6a8cf092733fd41131765dba9 --- /dev/null +++ b/vendor/github.com/spf13/afero/readonlyfs.go @@ -0,0 +1,80 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +var _ Lstater = (*ReadOnlyFs)(nil) + +type ReadOnlyFs struct { + source Fs +} + +func NewReadOnlyFs(source Fs) Fs { + return &ReadOnlyFs{source: source} +} + +func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { + return ReadDir(r.source, name) +} + +func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Name() string { + return "ReadOnlyFilter" +} + +func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { + return r.source.Stat(name) +} + +func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + if lsf, ok := r.source.(Lstater); ok { + return lsf.LstatIfPossible(name) + } + fi, err := r.Stat(name) + return fi, false, err +} + +func (r *ReadOnlyFs) Rename(o, n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) RemoveAll(p string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Remove(n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + return nil, syscall.EPERM + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *ReadOnlyFs) Open(n string) (File, error) { + return r.source.Open(n) +} + +func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Create(n string) (File, error) { + return nil, syscall.EPERM +} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go new file mode 100644 index 0000000000000000000000000000000000000000..9d92dbc051ff5591e312ffd09914f914590f8687 --- /dev/null +++ b/vendor/github.com/spf13/afero/regexpfs.go @@ -0,0 +1,214 @@ +package afero + +import ( + "os" + "regexp" + "syscall" + "time" +) + +// The RegexpFs filters files (not directories) by regular expression. Only +// files matching the given regexp will be allowed, all others get a ENOENT error ( +// "No such file or directory"). +// +type RegexpFs struct { + re *regexp.Regexp + source Fs +} + +func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { + return &RegexpFs{source: source, re: re} +} + +type RegexpFile struct { + f File + re *regexp.Regexp +} + +func (r *RegexpFs) matchesName(name string) error { + if r.re == nil { + return nil + } + if r.re.MatchString(name) { + return nil + } + return syscall.ENOENT +} + +func (r *RegexpFs) dirOrMatches(name string) error { + dir, err := IsDir(r.source, name) + if err != nil { + return err + } + if dir { + return nil + } + return r.matchesName(name) +} + +func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chtimes(name, a, m) +} + +func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chmod(name, mode) +} + +func (r *RegexpFs) Name() string { + return "RegexpFs" +} + +func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.Stat(name) +} + +func (r *RegexpFs) Rename(oldname, newname string) error { + dir, err := IsDir(r.source, oldname) + if err != nil { + return err + } + if dir { + return nil + } + if err := r.matchesName(oldname); err != nil { + return err + } + if err := r.matchesName(newname); err != nil { + return err + } + return r.source.Rename(oldname, newname) +} + +func (r *RegexpFs) RemoveAll(p string) error { + dir, err := IsDir(r.source, p) + if err != nil { + return err + } + if !dir { + if err := r.matchesName(p); err != nil { + return err + } + } + return r.source.RemoveAll(p) +} + +func (r *RegexpFs) Remove(name string) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Remove(name) +} + +func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *RegexpFs) Open(name string) (File, error) { + dir, err := IsDir(r.source, name) + if err != nil { + return nil, err + } + if !dir { + if err := r.matchesName(name); err != nil { + return nil, err + } + } + f, err := r.source.Open(name) + return &RegexpFile{f: f, re: r.re}, nil +} + +func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { + return r.source.Mkdir(n, p) +} + +func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { + return r.source.MkdirAll(n, p) +} + +func (r *RegexpFs) Create(name string) (File, error) { + if err := r.matchesName(name); err != nil { + return nil, err + } + return r.source.Create(name) +} + +func (f *RegexpFile) Close() error { + return f.f.Close() +} + +func (f *RegexpFile) Read(s []byte) (int, error) { + return f.f.Read(s) +} + +func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { + return f.f.ReadAt(s, o) +} + +func (f *RegexpFile) Seek(o int64, w int) (int64, error) { + return f.f.Seek(o, w) +} + +func (f *RegexpFile) Write(s []byte) (int, error) { + return f.f.Write(s) +} + +func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { + return f.f.WriteAt(s, o) +} + +func (f *RegexpFile) Name() string { + return f.f.Name() +} + +func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { + var rfi []os.FileInfo + rfi, err = f.f.Readdir(c) + if err != nil { + return nil, err + } + for _, i := range rfi { + if i.IsDir() || f.re.MatchString(i.Name()) { + fi = append(fi, i) + } + } + return fi, nil +} + +func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { + fi, err := f.Readdir(c) + if err != nil { + return nil, err + } + for _, s := range fi { + n = append(n, s.Name()) + } + return n, nil +} + +func (f *RegexpFile) Stat() (os.FileInfo, error) { + return f.f.Stat() +} + +func (f *RegexpFile) Sync() error { + return f.f.Sync() +} + +func (f *RegexpFile) Truncate(s int64) error { + return f.f.Truncate(s) +} + +func (f *RegexpFile) WriteString(s string) (int, error) { + return f.f.WriteString(s) +} diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go new file mode 100644 index 0000000000000000000000000000000000000000..eda96312df6bb8ebe107d5c5dca709f50c6f1de8 --- /dev/null +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -0,0 +1,320 @@ +package afero + +import ( + "io" + "os" + "path/filepath" + "syscall" +) + +// The UnionFile implements the afero.File interface and will be returned +// when reading a directory present at least in the overlay or opening a file +// for writing. +// +// The calls to +// Readdir() and Readdirnames() merge the file os.FileInfo / names from the +// base and the overlay - for files present in both layers, only those +// from the overlay will be used. +// +// When opening files for writing (Create() / OpenFile() with the right flags) +// the operations will be done in both layers, starting with the overlay. A +// successful read in the overlay will move the cursor position in the base layer +// by the number of bytes read. +type UnionFile struct { + Base File + Layer File + Merger DirsMerger + off int + files []os.FileInfo +} + +func (f *UnionFile) Close() error { + // first close base, so we have a newer timestamp in the overlay. If we'd close + // the overlay first, we'd get a cacheStale the next time we access this file + // -> cache would be useless ;-) + if f.Base != nil { + f.Base.Close() + } + if f.Layer != nil { + return f.Layer.Close() + } + return BADFD +} + +func (f *UnionFile) Read(s []byte) (int, error) { + if f.Layer != nil { + n, err := f.Layer.Read(s) + if (err == nil || err == io.EOF) && f.Base != nil { + // advance the file position also in the base file, the next + // call may be a write at this position (or a seek with SEEK_CUR) + if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { + // only overwrite err in case the seek fails: we need to + // report an eventual io.EOF to the caller + err = seekErr + } + } + return n, err + } + if f.Base != nil { + return f.Base.Read(s) + } + return 0, BADFD +} + +func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { + if f.Layer != nil { + n, err := f.Layer.ReadAt(s, o) + if (err == nil || err == io.EOF) && f.Base != nil { + _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) + } + return n, err + } + if f.Base != nil { + return f.Base.ReadAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { + if f.Layer != nil { + pos, err = f.Layer.Seek(o, w) + if (err == nil || err == io.EOF) && f.Base != nil { + _, err = f.Base.Seek(o, w) + } + return pos, err + } + if f.Base != nil { + return f.Base.Seek(o, w) + } + return 0, BADFD +} + +func (f *UnionFile) Write(s []byte) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.Write(s) + if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? + _, err = f.Base.Write(s) + } + return n, err + } + if f.Base != nil { + return f.Base.Write(s) + } + return 0, BADFD +} + +func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.WriteAt(s, o) + if err == nil && f.Base != nil { + _, err = f.Base.WriteAt(s, o) + } + return n, err + } + if f.Base != nil { + return f.Base.WriteAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Name() string { + if f.Layer != nil { + return f.Layer.Name() + } + return f.Base.Name() +} + +// DirsMerger is how UnionFile weaves two directories together. +// It takes the FileInfo slices from the layer and the base and returns a +// single view. +type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) + +var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { + var files = make(map[string]os.FileInfo) + + for _, fi := range lofi { + files[fi.Name()] = fi + } + + for _, fi := range bofi { + if _, exists := files[fi.Name()]; !exists { + files[fi.Name()] = fi + } + } + + rfi := make([]os.FileInfo, len(files)) + + i := 0 + for _, fi := range files { + rfi[i] = fi + i++ + } + + return rfi, nil + +} + +// Readdir will weave the two directories together and +// return a single view of the overlayed directories. +// At the end of the directory view, the error is io.EOF if c > 0. +func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { + var merge DirsMerger = f.Merger + if merge == nil { + merge = defaultUnionMergeDirsFn + } + + if f.off == 0 { + var lfi []os.FileInfo + if f.Layer != nil { + lfi, err = f.Layer.Readdir(-1) + if err != nil { + return nil, err + } + } + + var bfi []os.FileInfo + if f.Base != nil { + bfi, err = f.Base.Readdir(-1) + if err != nil { + return nil, err + } + + } + merged, err := merge(lfi, bfi) + if err != nil { + return nil, err + } + f.files = append(f.files, merged...) + } + + if c <= 0 && len(f.files) == 0 { + return f.files, nil + } + + if f.off >= len(f.files) { + return nil, io.EOF + } + + if c <= 0 { + return f.files[f.off:], nil + } + + if c > len(f.files) { + c = len(f.files) + } + + defer func() { f.off += c }() + return f.files[f.off:c], nil +} + +func (f *UnionFile) Readdirnames(c int) ([]string, error) { + rfi, err := f.Readdir(c) + if err != nil { + return nil, err + } + var names []string + for _, fi := range rfi { + names = append(names, fi.Name()) + } + return names, nil +} + +func (f *UnionFile) Stat() (os.FileInfo, error) { + if f.Layer != nil { + return f.Layer.Stat() + } + if f.Base != nil { + return f.Base.Stat() + } + return nil, BADFD +} + +func (f *UnionFile) Sync() (err error) { + if f.Layer != nil { + err = f.Layer.Sync() + if err == nil && f.Base != nil { + err = f.Base.Sync() + } + return err + } + if f.Base != nil { + return f.Base.Sync() + } + return BADFD +} + +func (f *UnionFile) Truncate(s int64) (err error) { + if f.Layer != nil { + err = f.Layer.Truncate(s) + if err == nil && f.Base != nil { + err = f.Base.Truncate(s) + } + return err + } + if f.Base != nil { + return f.Base.Truncate(s) + } + return BADFD +} + +func (f *UnionFile) WriteString(s string) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.WriteString(s) + if err == nil && f.Base != nil { + _, err = f.Base.WriteString(s) + } + return n, err + } + if f.Base != nil { + return f.Base.WriteString(s) + } + return 0, BADFD +} + +func copyToLayer(base Fs, layer Fs, name string) error { + bfh, err := base.Open(name) + if err != nil { + return err + } + defer bfh.Close() + + // First make sure the directory exists + exists, err := Exists(layer, filepath.Dir(name)) + if err != nil { + return err + } + if !exists { + err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? + if err != nil { + return err + } + } + + // Create the file on the overlay + lfh, err := layer.Create(name) + if err != nil { + return err + } + n, err := io.Copy(lfh, bfh) + if err != nil { + // If anything fails, clean up the file + layer.Remove(name) + lfh.Close() + return err + } + + bfi, err := bfh.Stat() + if err != nil || bfi.Size() != n { + layer.Remove(name) + lfh.Close() + return syscall.EIO + } + + err = lfh.Close() + if err != nil { + layer.Remove(name) + lfh.Close() + return err + } + return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) +} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go new file mode 100644 index 0000000000000000000000000000000000000000..4f253f481edd15798ff64305d002329c11da2e90 --- /dev/null +++ b/vendor/github.com/spf13/afero/util.go @@ -0,0 +1,330 @@ +// Copyright ©2015 Steve Francia <spf@spf13.com> +// Portions Copyright ©2015 The Hugo Authors +// Portions Copyright 2016-present Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com> +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "unicode" + + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +// Filepath separator defined by os.Separator. +const FilePathSeparator = string(filepath.Separator) + +// Takes a reader and a path and writes the content +func (a Afero) WriteReader(path string, r io.Reader) (err error) { + return WriteReader(a.Fs, path, r) +} + +func WriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + if err != os.ErrExist { + return err + } + } + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +// Same as WriteReader but checks to see if file/directory already exists. +func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { + return SafeWriteReader(a.Fs, path, r) +} + +func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + return + } + } + + exists, err := Exists(fs, path) + if err != nil { + return + } + if exists { + return fmt.Errorf("%v already exists", path) + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +func (a Afero) GetTempDir(subPath string) string { + return GetTempDir(a.Fs, subPath) +} + +// GetTempDir returns the default temp directory with trailing slash +// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx +func GetTempDir(fs Fs, subPath string) string { + addSlash := func(p string) string { + if FilePathSeparator != p[len(p)-1:] { + p = p + FilePathSeparator + } + return p + } + dir := addSlash(os.TempDir()) + + if subPath != "" { + // preserve windows backslash :-( + if FilePathSeparator == "\\" { + subPath = strings.Replace(subPath, "\\", "____", -1) + } + dir = dir + UnicodeSanitize((subPath)) + if FilePathSeparator == "\\" { + dir = strings.Replace(dir, "____", "\\", -1) + } + + if exists, _ := Exists(fs, dir); exists { + return addSlash(dir) + } + + err := fs.MkdirAll(dir, 0777) + if err != nil { + panic(err) + } + dir = addSlash(dir) + } + return dir +} + +// Rewrite string to remove non-standard path characters +func UnicodeSanitize(s string) string { + source := []rune(s) + target := make([]rune, 0, len(source)) + + for _, r := range source { + if unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsMark(r) || + r == '.' || + r == '/' || + r == '\\' || + r == '_' || + r == '-' || + r == '%' || + r == ' ' || + r == '#' { + target = append(target, r) + } + } + + return string(target) +} + +// Transform characters with accents into plain forms. +func NeuterAccents(s string) string { + t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) + result, _, _ := transform.String(t, string(s)) + + return result +} + +func isMn(r rune) bool { + return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks +} + +func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { + return FileContainsBytes(a.Fs, filename, subslice) +} + +// Check if a file contains a specified byte slice. +func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslice), nil +} + +func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { + return FileContainsAnyBytes(a.Fs, filename, subslices) +} + +// Check if a file contains any of the specified byte slices. +func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslices...), nil +} + +// readerContains reports whether any of the subslices is within r. +func readerContainsAny(r io.Reader, subslices ...[]byte) bool { + + if r == nil || len(subslices) == 0 { + return false + } + + largestSlice := 0 + + for _, sl := range subslices { + if len(sl) > largestSlice { + largestSlice = len(sl) + } + } + + if largestSlice == 0 { + return false + } + + bufflen := largestSlice * 4 + halflen := bufflen / 2 + buff := make([]byte, bufflen) + var err error + var n, i int + + for { + i++ + if i == 1 { + n, err = io.ReadAtLeast(r, buff[:halflen], halflen) + } else { + if i != 2 { + // shift left to catch overlapping matches + copy(buff[:], buff[halflen:]) + } + n, err = io.ReadAtLeast(r, buff[halflen:], halflen) + } + + if n > 0 { + for _, sl := range subslices { + if bytes.Contains(buff, sl) { + return true + } + } + } + + if err != nil { + break + } + } + return false +} + +func (a Afero) DirExists(path string) (bool, error) { + return DirExists(a.Fs, path) +} + +// DirExists checks if a path exists and is a directory. +func DirExists(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err == nil && fi.IsDir() { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func (a Afero) IsDir(path string) (bool, error) { + return IsDir(a.Fs, path) +} + +// IsDir checks if a given path is a directory. +func IsDir(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + return fi.IsDir(), nil +} + +func (a Afero) IsEmpty(path string) (bool, error) { + return IsEmpty(a.Fs, path) +} + +// IsEmpty checks if a given file or directory is empty. +func IsEmpty(fs Fs, path string) (bool, error) { + if b, _ := Exists(fs, path); !b { + return false, fmt.Errorf("%q path does not exist", path) + } + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + if fi.IsDir() { + f, err := fs.Open(path) + if err != nil { + return false, err + } + defer f.Close() + list, err := f.Readdir(-1) + return len(list) == 0, nil + } + return fi.Size() == 0, nil +} + +func (a Afero) Exists(path string) (bool, error) { + return Exists(a.Fs, path) +} + +// Check if a file or directory exists. +func Exists(fs Fs, path string) (bool, error) { + _, err := fs.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { + combinedPath := filepath.Join(basePathFs.path, relativePath) + if parent, ok := basePathFs.source.(*BasePathFs); ok { + return FullBaseFsPath(parent, combinedPath) + } + + return combinedPath +} diff --git a/vendor/sigs.k8s.io/controller-tools/LICENSE b/vendor/sigs.k8s.io/controller-tools/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..8dada3edaf50dbc082c9a125058f25def75e625a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go b/vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go new file mode 100644 index 0000000000000000000000000000000000000000..74c29d21d998c439cee7172fde487edf11cd8ba3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go @@ -0,0 +1,213 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import ( + "fmt" + "log" + "os" + "path/filepath" + + "github.com/spf13/cobra" + crdgenerator "sigs.k8s.io/controller-tools/pkg/crd/generator" + "sigs.k8s.io/controller-tools/pkg/rbac" + "sigs.k8s.io/controller-tools/pkg/webhook" +) + +func main() { + rootCmd := &cobra.Command{ + Use: "controller-gen", + Short: "A reference implementation generation tool for Kubernetes APIs.", + Long: `A reference implementation generation tool for Kubernetes APIs.`, + Example: ` # Generate RBAC manifests for a project + controller-gen rbac + + # Generate CRD manifests for a project + controller-gen crd + + # Run all the generators for a given project + controller-gen all +`, + } + + rootCmd.AddCommand( + newRBACCmd(), + newCRDCmd(), + newWebhookCmd(), + newAllSubCmd(), + ) + + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +func newRBACCmd() *cobra.Command { + o := &rbac.ManifestOptions{} + o.SetDefaults() + + cmd := &cobra.Command{ + Use: "rbac", + Short: "Generates RBAC manifests", + Long: `Generate RBAC manifests from the RBAC annotations in Go source files. +Usage: +# controller-gen rbac [--name manager] [--input-dir input_dir] [--output-dir output_dir] +`, + Run: func(_ *cobra.Command, _ []string) { + if err := rbac.Generate(o); err != nil { + log.Fatal(err) + } + fmt.Printf("RBAC manifests generated under '%s' directory\n", o.OutputDir) + }, + } + + f := cmd.Flags() + f.StringVar(&o.Name, "name", o.Name, "name to be used as prefix in identifier for manifests") + f.StringVar(&o.ServiceAccount, "service-account", o.ServiceAccount, "service account to bind the role to") + f.StringVar(&o.Namespace, "service-account-namespace", o.Namespace, "namespace of the service account to bind the role to") + f.StringVar(&o.InputDir, "input-dir", o.InputDir, "input directory pointing to Go source files") + f.StringVar(&o.OutputDir, "output-dir", o.OutputDir, "output directory where generated manifests will be saved") + f.StringVar(&o.RoleFile, "role-file", o.RoleFile, "output file for the role manifest") + f.StringVar(&o.BindingFile, "binding-file", o.BindingFile, "output file for the role binding manifest") + + return cmd +} + +func newCRDCmd() *cobra.Command { + g := &crdgenerator.Generator{} + + cmd := &cobra.Command{ + Use: "crd", + Short: "Generates CRD manifests", + Long: `Generate CRD manifests from the Type definitions in Go source files. +Usage: +# controller-gen crd [--domain k8s.io] [--root-path input_dir] [--output-dir output_dir] +`, + Run: func(_ *cobra.Command, _ []string) { + if err := g.ValidateAndInitFields(); err != nil { + log.Fatal(err) + } + if err := g.Do(); err != nil { + log.Fatal(err) + } + fmt.Printf("CRD files generated, files can be found under path %s.\n", g.OutputDir) + }, + } + + f := cmd.Flags() + f.StringVar(&g.RootPath, "root-path", "", "working dir, must have PROJECT file under the path or parent path if domain not set") + f.StringVar(&g.OutputDir, "output-dir", "", "output directory, default to 'config/crds' under root path") + f.StringVar(&g.Domain, "domain", "", "domain of the resources, will try to fetch it from PROJECT file if not specified") + f.StringVar(&g.Namespace, "namespace", "", "CRD namespace, treat it as cluster scoped if not set") + f.BoolVar(&g.SkipMapValidation, "skip-map-validation", true, "if set to true, skip generating OpenAPI validation schema for map type in CRD.") + f.StringVar(&g.APIsPath, "apis-path", "pkg/apis", "the path to search for apis relative to the current directory") + f.StringVar(&g.APIsPkg, "apis-pkg", "", "the absolute Go pkg name for current project's api pkg.") + + return cmd +} + +func newAllSubCmd() *cobra.Command { + var ( + projectDir, namespace string + ) + + cmd := &cobra.Command{ + Use: "all", + Short: "runs all generators for a project", + Long: `Run all available generators for a given project +Usage: +# controller-gen all +`, + Run: func(_ *cobra.Command, _ []string) { + if projectDir == "" { + currDir, err := os.Getwd() + if err != nil { + log.Fatalf("project-dir missing, failed to use current directory: %v", err) + } + projectDir = currDir + } + crdGen := &crdgenerator.Generator{ + RootPath: projectDir, + OutputDir: filepath.Join(projectDir, "config", "crds"), + Namespace: namespace, + SkipMapValidation: true, + } + if err := crdGen.ValidateAndInitFields(); err != nil { + log.Fatal(err) + } + if err := crdGen.Do(); err != nil { + log.Fatal(err) + } + fmt.Printf("CRD manifests generated under '%s' \n", crdGen.OutputDir) + + // RBAC generation + rbacOptions := &rbac.ManifestOptions{ + InputDir: filepath.Join(projectDir, "pkg"), + OutputDir: filepath.Join(projectDir, "config", "rbac"), + Name: "manager", + } + if err := rbac.Generate(rbacOptions); err != nil { + log.Fatal(err) + } + fmt.Printf("RBAC manifests generated under '%s' \n", rbacOptions.OutputDir) + + o := &webhook.Options{ + WriterOptions: webhook.WriterOptions{ + InputDir: filepath.Join(projectDir, "pkg"), + OutputDir: filepath.Join(projectDir, "config", "webhook"), + PatchOutputDir: filepath.Join(projectDir, "config", "default"), + }, + } + o.SetDefaults() + if err := webhook.Generate(o); err != nil { + log.Fatal(err) + } + fmt.Printf("webhook manifests generated under '%s' directory\n", o.OutputDir) + }, + } + f := cmd.Flags() + f.StringVar(&projectDir, "project-dir", "", "project directory, it must have PROJECT file") + f.StringVar(&namespace, "namespace", "", "CRD namespace, treat it as cluster scoped if not set") + return cmd +} + +func newWebhookCmd() *cobra.Command { + o := &webhook.Options{} + o.SetDefaults() + + cmd := &cobra.Command{ + Use: "webhook", + Short: "Generates webhook related manifests", + Long: `Generate webhook related manifests from the webhook annotations in Go source files. +Usage: +# controller-gen webhook [--input-dir input_dir] [--output-dir output_dir] [--patch-output-dir patch-output_dir] +`, + Run: func(_ *cobra.Command, _ []string) { + if err := webhook.Generate(o); err != nil { + log.Fatal(err) + } + fmt.Printf("webhook manifests generated under '%s' directory\n", o.OutputDir) + }, + } + + f := cmd.Flags() + f.StringVar(&o.InputDir, "input-dir", o.InputDir, "input directory pointing to Go source files") + f.StringVar(&o.OutputDir, "output-dir", o.OutputDir, "output directory where generated manifests will be saved.") + f.StringVar(&o.PatchOutputDir, "patch-output-dir", o.PatchOutputDir, "output directory where generated kustomize patch will be saved.") + + return cmd +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/generator/generator.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/generator/generator.go new file mode 100644 index 0000000000000000000000000000000000000000..dfbf15278995b2e2411cbffe6497fb4dfe2a0515 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/generator/generator.go @@ -0,0 +1,207 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import ( + "fmt" + "log" + "os" + "path" + "strings" + + "github.com/ghodss/yaml" + "github.com/spf13/afero" + extensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/gengo/args" + "k8s.io/gengo/types" + crdutil "sigs.k8s.io/controller-tools/pkg/crd/util" + "sigs.k8s.io/controller-tools/pkg/internal/codegen" + "sigs.k8s.io/controller-tools/pkg/internal/codegen/parse" + "sigs.k8s.io/controller-tools/pkg/util" +) + +// Generator generates CRD manifests from API resource definitions defined in Go source files. +type Generator struct { + RootPath string + OutputDir string + Domain string + Namespace string + SkipMapValidation bool + + // OutFs is filesystem to be used for writing out the result + OutFs afero.Fs + + // apisPkg is the absolute Go pkg name for current project's 'pkg/apis' pkg. + // This is needed to determine if a Type belongs to the project or it is a referred Type. + apisPkg string + + // APIsPath and APIsPkg allow customized generation for Go types existing under directories other than pkg/apis + APIsPath string + APIsPkg string +} + +// ValidateAndInitFields validate and init generator fields. +func (c *Generator) ValidateAndInitFields() error { + var err error + + if c.OutFs == nil { + c.OutFs = afero.NewOsFs() + } + + if len(c.RootPath) == 0 { + // Take current path as root path if not specified. + c.RootPath, err = os.Getwd() + if err != nil { + return err + } + } + + // Validate root path is under go src path + if !crdutil.IsUnderGoSrcPath(c.RootPath) { + return fmt.Errorf("command must be run from path under $GOPATH/src/<package>") + } + + // If Domain is not explicitly specified, + // try to search for PROJECT file as a basis. + if len(c.Domain) == 0 { + if !crdutil.PathHasProjectFile(c.RootPath) { + return fmt.Errorf("PROJECT file missing in dir %s", c.RootPath) + } + c.Domain = crdutil.GetDomainFromProject(c.RootPath) + } + + err = c.setAPIsPkg() + if err != nil { + return err + } + + // Init output directory + if c.OutputDir == "" { + c.OutputDir = path.Join(c.RootPath, "config/crds") + } + + return nil +} + +// Do manages CRD generation. +func (c *Generator) Do() error { + arguments := args.Default() + b, err := arguments.NewBuilder() + if err != nil { + return fmt.Errorf("failed making a parser: %v", err) + } + + // Switch working directory to root path. + if err := os.Chdir(c.RootPath); err != nil { + return fmt.Errorf("failed switching working dir: %v", err) + } + + if err := b.AddDirRecursive("./" + c.APIsPath); err != nil { + return fmt.Errorf("failed making a parser: %v", err) + } + ctx, err := parse.NewContext(b) + if err != nil { + return fmt.Errorf("failed making a context: %v", err) + } + + arguments.CustomArgs = &parse.Options{SkipMapValidation: c.SkipMapValidation} + + // TODO: find an elegant way to fulfill the domain in APIs. + p := parse.NewAPIs(ctx, arguments, c.Domain, c.apisPkg) + crds := c.getCrds(p) + + return c.writeCRDs(crds) +} + +func (c *Generator) writeCRDs(crds map[string][]byte) error { + // Ensure output dir exists. + if err := c.OutFs.MkdirAll(c.OutputDir, os.FileMode(0700)); err != nil { + return err + } + + for file, crd := range crds { + outFile := path.Join(c.OutputDir, file) + if err := (&util.FileWriter{Fs: c.OutFs}).WriteFile(outFile, crd); err != nil { + return err + } + } + return nil +} + +func getCRDFileName(resource *codegen.APIResource) string { + elems := []string{resource.Group, resource.Version, strings.ToLower(resource.Kind)} + return strings.Join(elems, "_") + ".yaml" +} + +func (c *Generator) getCrds(p *parse.APIs) map[string][]byte { + crds := map[string]extensionsv1beta1.CustomResourceDefinition{} + for _, g := range p.APIs.Groups { + for _, v := range g.Versions { + for _, r := range v.Resources { + crd := r.CRD + // ignore types which do not belong to this project + if !c.belongsToAPIsPkg(r.Type) { + continue + } + if len(c.Namespace) > 0 { + crd.Namespace = c.Namespace + } + fileName := getCRDFileName(r) + crds[fileName] = crd + } + } + } + + result := map[string][]byte{} + for file, crd := range crds { + b, err := yaml.Marshal(crd) + if err != nil { + log.Fatalf("Error: %v", err) + } + result[file] = b + } + + return result +} + +// belongsToAPIsPkg returns true if type t is defined under pkg/apis pkg of +// current project. +func (c *Generator) belongsToAPIsPkg(t *types.Type) bool { + return strings.HasPrefix(t.Name.Package, c.apisPkg) +} + +func (c *Generator) setAPIsPkg() error { + var err error + if c.APIsPath == "" { + c.APIsPath = "pkg/apis" + } + + c.apisPkg = c.APIsPkg + if c.apisPkg == "" { + // Validate apis directory exists under working path + apisPath := path.Join(c.RootPath, c.APIsPath) + if _, err := os.Stat(apisPath); err != nil { + return fmt.Errorf("error validating apis path %s: %v", apisPath, err) + } + + c.apisPkg, err = crdutil.DirToGoPkg(apisPath) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/util/util.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/util/util.go new file mode 100644 index 0000000000000000000000000000000000000000..821aab5d257fad2fb54609f3e8d2af5363d7d6a4 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/util/util.go @@ -0,0 +1,117 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bufio" + "fmt" + gobuild "go/build" + "log" + "os" + "path" + "path/filepath" + "strings" +) + +// IsGoSrcPath validate if given path is of path $GOPATH/src. +func IsGoSrcPath(filePath string) bool { + for _, gopath := range getGoPaths() { + goSrc := path.Join(gopath, "src") + if filePath == goSrc { + return true + } + } + + return false +} + +// IsUnderGoSrcPath validate if given path is under path $GOPATH/src. +func IsUnderGoSrcPath(filePath string) bool { + for _, gopath := range getGoPaths() { + goSrc := path.Join(gopath, "src") + if strings.HasPrefix(filepath.Dir(filePath), goSrc) { + return true + } + } + + return false +} + +// DirToGoPkg returns the Gopkg for the given directory if it exists +// under a GOPATH otherwise returns error. For example, +// /Users/x/go/src/github.com/y/z ==> github.com/y/z +func DirToGoPkg(dir string) (pkg string, err error) { + goPaths := getGoPaths() + for _, gopath := range goPaths { + goSrc := path.Join(gopath, "src") + if !strings.HasPrefix(dir, goSrc) { + continue + } + pkg, err := filepath.Rel(goSrc, dir) + if err == nil { + return pkg, err + } + } + + return "", fmt.Errorf("dir '%s' does not exist under any GOPATH %v", dir, goPaths) +} + +func getGoPaths() []string { + gopaths := os.Getenv("GOPATH") + if len(gopaths) == 0 { + gopaths = gobuild.Default.GOPATH + } + return filepath.SplitList(gopaths) +} + +// PathHasProjectFile validate if PROJECT file exists under the path. +func PathHasProjectFile(filePath string) bool { + if _, err := os.Stat(path.Join(filePath, "PROJECT")); os.IsNotExist(err) { + return false + } + + return true +} + +// GetDomainFromProject get domain information from the PROJECT file under the path. +func GetDomainFromProject(rootPath string) string { + var domain string + + file, err := os.Open(path.Join(rootPath, "PROJECT")) + if err != nil { + log.Fatal(err) + } + defer func() { + if err := file.Close(); err != nil { + log.Fatal(err) + } + }() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + if strings.HasPrefix(scanner.Text(), "domain:") { + domainInfo := strings.Split(scanner.Text(), ":") + if len(domainInfo) != 2 { + log.Fatalf("Unexpected domain info: %s", scanner.Text()) + } + domain = strings.Replace(domainInfo[1], " ", "", -1) + break + } + } + + return domain +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/apis.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/apis.go new file mode 100644 index 0000000000000000000000000000000000000000..c953b4b3b612b9424934ded5c02574604fe9d613 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/apis.go @@ -0,0 +1,287 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parse + +import ( + "fmt" + "path" + "path/filepath" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/gengo/types" + "sigs.k8s.io/controller-tools/pkg/internal/codegen" +) + +type genUnversionedType struct { + Type *types.Type + Resource *codegen.APIResource +} + +func (b *APIs) parseAPIs() { + apis := &codegen.APIs{ + Domain: b.Domain, + Package: b.APIsPkg, + Groups: map[string]*codegen.APIGroup{}, + Rules: b.Rules, + Informers: b.Informers, + } + + for group, versionMap := range b.ByGroupVersionKind { + apiGroup := &codegen.APIGroup{ + Group: group, + GroupTitle: strings.Title(group), + Domain: b.Domain, + Versions: map[string]*codegen.APIVersion{}, + UnversionedResources: map[string]*codegen.APIResource{}, + } + + for version, kindMap := range versionMap { + apiVersion := &codegen.APIVersion{ + Domain: b.Domain, + Group: group, + Version: version, + Resources: map[string]*codegen.APIResource{}, + } + for kind, resource := range kindMap { + apiResource := &codegen.APIResource{ + Domain: resource.Domain, + Version: resource.Version, + Group: resource.Group, + Resource: resource.Resource, + Type: resource.Type, + REST: resource.REST, + Kind: resource.Kind, + Subresources: resource.Subresources, + StatusStrategy: resource.StatusStrategy, + Strategy: resource.Strategy, + NonNamespaced: resource.NonNamespaced, + ShortName: resource.ShortName, + } + parseDoc(resource, apiResource) + apiVersion.Resources[kind] = apiResource + // Set the package for the api version + apiVersion.Pkg = b.context.Universe[resource.Type.Name.Package] + // Set the package for the api group + apiGroup.Pkg = b.context.Universe[filepath.Dir(resource.Type.Name.Package)] + if apiGroup.Pkg != nil { + apiGroup.PkgPath = apiGroup.Pkg.Path + } + + apiGroup.UnversionedResources[kind] = apiResource + } + + apiGroup.Versions[version] = apiVersion + } + b.parseStructs(apiGroup) + apis.Groups[group] = apiGroup + } + apis.Pkg = b.context.Universe[b.APIsPkg] + b.APIs = apis +} + +func (b *APIs) parseStructs(apigroup *codegen.APIGroup) { + remaining := []genUnversionedType{} + for _, version := range apigroup.Versions { + for _, resource := range version.Resources { + remaining = append(remaining, genUnversionedType{resource.Type, resource}) + } + } + for _, version := range b.SubByGroupVersionKind[apigroup.Group] { + for _, kind := range version { + remaining = append(remaining, genUnversionedType{kind, nil}) + } + } + + done := sets.String{} + for len(remaining) > 0 { + // Pop the next element from the list + next := remaining[0] + remaining[0] = remaining[len(remaining)-1] + remaining = remaining[:len(remaining)-1] + + // Already processed this type. Skip it + if done.Has(next.Type.Name.Name) { + continue + } + done.Insert(next.Type.Name.Name) + + // Generate the struct and append to the list + result, additionalTypes := parseType(next.Type) + + // This is a resource, so generate the client + if b.genClient(next.Type) { + result.GenClient = true + result.GenDeepCopy = true + } + + if next.Resource != nil { + result.NonNamespaced = IsNonNamespaced(next.Type) + } + + if b.genDeepCopy(next.Type) { + result.GenDeepCopy = true + } + apigroup.Structs = append(apigroup.Structs, result) + + // Add the newly discovered subtypes + for _, at := range additionalTypes { + remaining = append(remaining, genUnversionedType{at, nil}) + } + } +} + +// parseType parses the type into a Struct, and returns a list of types that +// need to be parsed +func parseType(t *types.Type) (*codegen.Struct, []*types.Type) { + remaining := []*types.Type{} + + s := &codegen.Struct{ + Name: t.Name.Name, + GenClient: false, + GenUnversioned: true, // Generate unversioned structs by default + } + + for _, c := range t.CommentLines { + if strings.Contains(c, "+genregister:unversioned=false") { + // Don't generate the unversioned struct + s.GenUnversioned = false + } + } + + for _, member := range t.Members { + uType := member.Type.Name.Name + memberName := member.Name + uImport := "" + + // Use the element type for Pointers, Maps and Slices + mSubType := member.Type + hasElem := false + for mSubType.Elem != nil { + mSubType = mSubType.Elem + hasElem = true + } + if hasElem { + // Strip the package from the field type + uType = strings.Replace(member.Type.String(), mSubType.Name.Package+".", "", 1) + } + + base := filepath.Base(member.Type.String()) + samepkg := t.Name.Package == mSubType.Name.Package + + // If not in the same package, calculate the import pkg + if !samepkg { + parts := strings.Split(base, ".") + if len(parts) > 1 { + // Don't generate unversioned types for core types, just use the versioned types + if strings.HasPrefix(mSubType.Name.Package, "k8s.io/api/") { + // Import the package under an alias so it doesn't conflict with other groups + // having the same version + importAlias := path.Base(path.Dir(mSubType.Name.Package)) + path.Base(mSubType.Name.Package) + uImport = fmt.Sprintf("%s \"%s\"", importAlias, mSubType.Name.Package) + if hasElem { + // Replace the full package with the alias when referring to the type + uType = strings.Replace(member.Type.String(), mSubType.Name.Package, importAlias, 1) + } else { + // Replace the full package with the alias when referring to the type + uType = fmt.Sprintf("%s.%s", importAlias, parts[1]) + } + } else { + switch member.Type.Name.Package { + case "k8s.io/apimachinery/pkg/apis/meta/v1": + // Use versioned types for meta/v1 + uImport = fmt.Sprintf("%s \"%s\"", "metav1", "k8s.io/apimachinery/pkg/apis/meta/v1") + uType = "metav1." + parts[1] + default: + // Use unversioned types for everything else + t := member.Type + + if t.Elem != nil { + // handle Pointers, Maps, Slices + + // We need to parse the package from the Type String + t = t.Elem + str := member.Type.String() + startPkg := strings.LastIndexAny(str, "*]") + endPkg := strings.LastIndexAny(str, ".") + pkg := str[startPkg+1 : endPkg] + name := str[endPkg+1:] + prefix := str[:startPkg+1] + + uImportBase := path.Base(pkg) + uImportName := path.Base(path.Dir(pkg)) + uImportBase + uImport = fmt.Sprintf("%s \"%s\"", uImportName, pkg) + + uType = prefix + uImportName + "." + name + } else { + // handle non- Pointer, Maps, Slices + pkg := t.Name.Package + name := t.Name.Name + + // Come up with the alias the package is imported under + // Concatenate with directory package to reduce naming collisions + uImportBase := path.Base(pkg) + uImportName := path.Base(path.Dir(pkg)) + uImportBase + + // Create the import statement + uImport = fmt.Sprintf("%s \"%s\"", uImportName, pkg) + + // Create the field type name - should be <pkgalias>.<TypeName> + uType = uImportName + "." + name + } + } + } + } + } + + if member.Embedded { + memberName = "" + } + + s.Fields = append(s.Fields, &codegen.Field{ + Name: memberName, + VersionedPackage: member.Type.Name.Package, + UnversionedImport: uImport, + UnversionedType: uType, + }) + + // Add this member Type for processing if it isn't a primitive and + // is part of the same API group + if !mSubType.IsPrimitive() && GetGroup(mSubType) == GetGroup(t) { + remaining = append(remaining, mSubType) + } + } + return s, remaining +} + +func (b *APIs) genClient(c *types.Type) bool { + comments := Comments(c.CommentLines) + resource := comments.getTag("resource", ":") + comments.getTag("kubebuilder:resource", ":") + return len(resource) > 0 +} + +func (b *APIs) genDeepCopy(c *types.Type) bool { + comments := Comments(c.CommentLines) + return comments.hasTag("subresource-request") +} + +func parseDoc(resource, apiResource *codegen.APIResource) { + if HasDocAnnotation(resource.Type) { + resource.DocAnnotation = getDocAnnotation(resource.Type, "warning", "note") + apiResource.DocAnnotation = resource.DocAnnotation + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/context.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/context.go new file mode 100644 index 0000000000000000000000000000000000000000..98493540f90ffade21c207a9c808e86a51a42484 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/context.go @@ -0,0 +1,42 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parse + +import ( + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/parser" +) + +// NewContext returns a new Context from the builder +func NewContext(p *parser.Builder) (*generator.Context, error) { + return generator.NewContext(p, NameSystems(), DefaultNameSystem()) +} + +// DefaultNameSystem returns public by default. +func DefaultNameSystem() string { + return "public" +} + +// NameSystems returns the name system used by the generators in this package. +// e.g. black-magic +func NameSystems() namer.NameSystems { + return namer.NameSystems{ + "public": namer.NewPublicNamer(1), + "raw": namer.NewRawNamer("", nil), + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/crd.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/crd.go new file mode 100644 index 0000000000000000000000000000000000000000..a03f6bb82ae37a3e4e1ce2a612c2c7f0613fc9a3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/crd.go @@ -0,0 +1,639 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parse + +import ( + "bytes" + "encoding/json" + "fmt" + "log" + "regexp" + "strconv" + "strings" + "text/template" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/gengo/types" +) + +// parseCRDs populates the CRD field of each Group.Version.Resource, +// creating validations using the annotations on type fields. +func (b *APIs) parseCRDs() { + for _, group := range b.APIs.Groups { + for _, version := range group.Versions { + for _, resource := range version.Resources { + if IsAPIResource(resource.Type) { + resource.JSONSchemaProps, resource.Validation = + b.typeToJSONSchemaProps(resource.Type, sets.NewString(), []string{}, true) + + // Note: Drop the Type field at the root level of validation + // schema. Refer to following issue for details. + // https://github.com/kubernetes/kubernetes/issues/65293 + resource.JSONSchemaProps.Type = "" + j, err := json.MarshalIndent(resource.JSONSchemaProps, "", " ") + if err != nil { + log.Fatalf("Could not Marshall validation %v\n", err) + } + resource.ValidationComments = string(j) + + resource.CRD = v1beta1.CustomResourceDefinition{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apiextensions.k8s.io/v1beta1", + Kind: "CustomResourceDefinition", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s.%s.%s", resource.Resource, resource.Group, resource.Domain), + Labels: map[string]string{"controller-tools.k8s.io": "1.0"}, + }, + Spec: v1beta1.CustomResourceDefinitionSpec{ + Group: fmt.Sprintf("%s.%s", resource.Group, resource.Domain), + Version: resource.Version, + Names: v1beta1.CustomResourceDefinitionNames{ + Kind: resource.Kind, + Plural: resource.Resource, + }, + Validation: &v1beta1.CustomResourceValidation{ + OpenAPIV3Schema: &resource.JSONSchemaProps, + }, + }, + } + if resource.NonNamespaced { + resource.CRD.Spec.Scope = "Cluster" + } else { + resource.CRD.Spec.Scope = "Namespaced" + } + + if hasCategories(resource.Type) { + categoriesTag := getCategoriesTag(resource.Type) + categories := strings.Split(categoriesTag, ",") + resource.CRD.Spec.Names.Categories = categories + resource.Categories = categories + } + + if hasSingular(resource.Type) { + singularName := getSingularName(resource.Type) + resource.CRD.Spec.Names.Singular = singularName + } + + if hasStatusSubresource(resource.Type) { + if resource.CRD.Spec.Subresources == nil { + resource.CRD.Spec.Subresources = &v1beta1.CustomResourceSubresources{} + } + resource.CRD.Spec.Subresources.Status = &v1beta1.CustomResourceSubresourceStatus{} + } + + resource.CRD.Status.Conditions = []v1beta1.CustomResourceDefinitionCondition{} + resource.CRD.Status.StoredVersions = []string{} + + if hasScaleSubresource(resource.Type) { + if resource.CRD.Spec.Subresources == nil { + resource.CRD.Spec.Subresources = &v1beta1.CustomResourceSubresources{} + } + jsonPath, err := parseScaleParams(resource.Type) + if err != nil { + log.Fatalf("failed in parsing CRD, error: %v", err.Error()) + } + resource.CRD.Spec.Subresources.Scale = &v1beta1.CustomResourceSubresourceScale{ + SpecReplicasPath: jsonPath[specReplicasPath], + StatusReplicasPath: jsonPath[statusReplicasPath], + } + labelSelctor, ok := jsonPath[labelSelectorPath] + if ok && labelSelctor != "" { + resource.CRD.Spec.Subresources.Scale.LabelSelectorPath = &labelSelctor + } + } + if hasPrintColumn(resource.Type) { + result, err := parsePrintColumnParams(resource.Type) + if err != nil { + log.Fatalf("failed to parse printcolumn annotations, error: %v", err.Error()) + } + resource.CRD.Spec.AdditionalPrinterColumns = result + } + if len(resource.ShortName) > 0 { + resource.CRD.Spec.Names.ShortNames = strings.Split(resource.ShortName, ";") + } + } + } + } + } +} + +func (b *APIs) getTime() string { + return `v1beta1.JSONSchemaProps{ + Type: "string", + Format: "date-time", +}` +} + +func (b *APIs) getDuration() string { + return `v1beta1.JSONSchemaProps{ + Type: "string", +}` +} + +func (b *APIs) getQuantity() string { + return `v1beta1.JSONSchemaProps{ + Type: "string", +}` +} + +func (b *APIs) objSchema() string { + return `v1beta1.JSONSchemaProps{ + Type: "object", +}` +} + +// typeToJSONSchemaProps returns a JSONSchemaProps object and its serialization +// in Go that describe the JSONSchema validations for the given type. +func (b *APIs) typeToJSONSchemaProps(t *types.Type, found sets.String, comments []string, isRoot bool) (v1beta1.JSONSchemaProps, string) { + // Special cases + time := types.Name{Name: "Time", Package: "k8s.io/apimachinery/pkg/apis/meta/v1"} + duration := types.Name{Name: "Duration", Package: "k8s.io/apimachinery/pkg/apis/meta/v1"} + quantity := types.Name{Name: "Quantity", Package: "k8s.io/apimachinery/pkg/api/resource"} + meta := types.Name{Name: "ObjectMeta", Package: "k8s.io/apimachinery/pkg/apis/meta/v1"} + unstructured := types.Name{Name: "Unstructured", Package: "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"} + rawExtension := types.Name{Name: "RawExtension", Package: "k8s.io/apimachinery/pkg/runtime"} + intOrString := types.Name{Name: "IntOrString", Package: "k8s.io/apimachinery/pkg/util/intstr"} + // special types first + specialTypeProps := v1beta1.JSONSchemaProps{ + Description: parseDescription(comments), + } + for _, l := range comments { + getValidation(l, &specialTypeProps) + } + switch t.Name { + case time: + specialTypeProps.Type = "string" + specialTypeProps.Format = "date-time" + return specialTypeProps, b.getTime() + case duration: + specialTypeProps.Type = "string" + return specialTypeProps, b.getDuration() + case quantity: + specialTypeProps.Type = "string" + return specialTypeProps, b.getQuantity() + case meta, unstructured, rawExtension: + specialTypeProps.Type = "object" + return specialTypeProps, b.objSchema() + case intOrString: + specialTypeProps.AnyOf = []v1beta1.JSONSchemaProps{ + { + Type: "string", + }, + { + Type: "integer", + }, + } + return specialTypeProps, b.objSchema() + } + + var v v1beta1.JSONSchemaProps + var s string + switch t.Kind { + case types.Builtin: + v, s = b.parsePrimitiveValidation(t, found, comments) + case types.Struct: + v, s = b.parseObjectValidation(t, found, comments, isRoot) + case types.Map: + v, s = b.parseMapValidation(t, found, comments) + case types.Slice: + v, s = b.parseArrayValidation(t, found, comments) + case types.Array: + v, s = b.parseArrayValidation(t, found, comments) + case types.Pointer: + v, s = b.typeToJSONSchemaProps(t.Elem, found, comments, false) + case types.Alias: + v, s = b.typeToJSONSchemaProps(t.Underlying, found, comments, false) + default: + log.Fatalf("Unknown supported Kind %v\n", t.Kind) + } + + return v, s +} + +var jsonRegex = regexp.MustCompile("json:\"([a-zA-Z0-9,]+)\"") + +type primitiveTemplateArgs struct { + v1beta1.JSONSchemaProps + Value string + Format string + EnumValue string // TODO check type of enum value to match the type of field + Description string +} + +var primitiveTemplate = template.Must(template.New("map-template").Parse( + `v1beta1.JSONSchemaProps{ + {{ if .Pattern -}} + Pattern: "{{ .Pattern }}", + {{ end -}} + {{ if .Maximum -}} + Maximum: getFloat({{ .Maximum }}), + {{ end -}} + {{ if .ExclusiveMaximum -}} + ExclusiveMaximum: {{ .ExclusiveMaximum }}, + {{ end -}} + {{ if .Minimum -}} + Minimum: getFloat({{ .Minimum }}), + {{ end -}} + {{ if .ExclusiveMinimum -}} + ExclusiveMinimum: {{ .ExclusiveMinimum }}, + {{ end -}} + Type: "{{ .Value }}", + {{ if .Format -}} + Format: "{{ .Format }}", + {{ end -}} + {{ if .EnumValue -}} + Enum: {{ .EnumValue }}, + {{ end -}} + {{ if .MaxLength -}} + MaxLength: getInt({{ .MaxLength }}), + {{ end -}} + {{ if .MinLength -}} + MinLength: getInt({{ .MinLength }}), + {{ end -}} +}`)) + +// parsePrimitiveValidation returns a JSONSchemaProps object and its +// serialization in Go that describe the validations for the given primitive +// type. +func (b *APIs) parsePrimitiveValidation(t *types.Type, found sets.String, comments []string) (v1beta1.JSONSchemaProps, string) { + props := v1beta1.JSONSchemaProps{Type: string(t.Name.Name)} + + for _, l := range comments { + getValidation(l, &props) + } + + buff := &bytes.Buffer{} + + var n, f, s, d string + switch t.Name.Name { + case "int", "int64", "uint64": + n = "integer" + f = "int64" + case "int32", "uint32": + n = "integer" + f = "int32" + case "float", "float32": + n = "number" + f = "float" + case "float64": + n = "number" + f = "double" + case "bool": + n = "boolean" + case "string": + n = "string" + f = props.Format + default: + n = t.Name.Name + } + if props.Enum != nil { + s = parseEnumToString(props.Enum) + } + d = parseDescription(comments) + if err := primitiveTemplate.Execute(buff, primitiveTemplateArgs{props, n, f, s, d}); err != nil { + log.Fatalf("%v", err) + } + props.Type = n + props.Format = f + props.Description = d + return props, buff.String() +} + +type mapTempateArgs struct { + Result string + SkipMapValidation bool +} + +var mapTemplate = template.Must(template.New("map-template").Parse( + `v1beta1.JSONSchemaProps{ + Type: "object", + {{if not .SkipMapValidation}}AdditionalProperties: &v1beta1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &{{.Result}}, + },{{end}} +}`)) + +// parseMapValidation returns a JSONSchemaProps object and its serialization in +// Go that describe the validations for the given map type. +func (b *APIs) parseMapValidation(t *types.Type, found sets.String, comments []string) (v1beta1.JSONSchemaProps, string) { + additionalProps, result := b.typeToJSONSchemaProps(t.Elem, found, comments, false) + additionalProps.Description = "" + props := v1beta1.JSONSchemaProps{ + Type: "object", + Description: parseDescription(comments), + } + parseOption := b.arguments.CustomArgs.(*Options) + if !parseOption.SkipMapValidation { + props.AdditionalProperties = &v1beta1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &additionalProps} + } + + for _, l := range comments { + getValidation(l, &props) + } + + buff := &bytes.Buffer{} + if err := mapTemplate.Execute(buff, mapTempateArgs{Result: result, SkipMapValidation: parseOption.SkipMapValidation}); err != nil { + log.Fatalf("%v", err) + } + return props, buff.String() +} + +var arrayTemplate = template.Must(template.New("array-template").Parse( + `v1beta1.JSONSchemaProps{ + Type: "{{.Type}}", + {{ if .Format -}} + Format: "{{.Format}}", + {{ end -}} + {{ if .MaxItems -}} + MaxItems: getInt({{ .MaxItems }}), + {{ end -}} + {{ if .MinItems -}} + MinItems: getInt({{ .MinItems }}), + {{ end -}} + {{ if .UniqueItems -}} + UniqueItems: {{ .UniqueItems }}, + {{ end -}} + {{ if .Items -}} + Items: &v1beta1.JSONSchemaPropsOrArray{ + Schema: &{{.ItemsSchema}}, + }, + {{ end -}} +}`)) + +type arrayTemplateArgs struct { + v1beta1.JSONSchemaProps + ItemsSchema string +} + +// parseArrayValidation returns a JSONSchemaProps object and its serialization in +// Go that describe the validations for the given array type. +func (b *APIs) parseArrayValidation(t *types.Type, found sets.String, comments []string) (v1beta1.JSONSchemaProps, string) { + items, result := b.typeToJSONSchemaProps(t.Elem, found, comments, false) + items.Description = "" + props := v1beta1.JSONSchemaProps{ + Type: "array", + Items: &v1beta1.JSONSchemaPropsOrArray{Schema: &items}, + Description: parseDescription(comments), + } + // To represent byte arrays in the generated code, the property of the OpenAPI definition + // should have string as its type and byte as its format. + if t.Name.Name == "[]byte" { + props.Type = "string" + props.Format = "byte" + props.Items = nil + props.Description = parseDescription(comments) + } + for _, l := range comments { + getValidation(l, &props) + } + if t.Name.Name != "[]byte" { + // Except for the byte array special case above, the "format" property + // should be applied to the array items and not the array itself. + props.Format = "" + } + buff := &bytes.Buffer{} + if err := arrayTemplate.Execute(buff, arrayTemplateArgs{props, result}); err != nil { + log.Fatalf("%v", err) + } + return props, buff.String() +} + +type objectTemplateArgs struct { + v1beta1.JSONSchemaProps + Fields map[string]string + Required []string + IsRoot bool +} + +var objectTemplate = template.Must(template.New("object-template").Parse( + `v1beta1.JSONSchemaProps{ + {{ if not .IsRoot -}} + Type: "object", + {{ end -}} + Properties: map[string]v1beta1.JSONSchemaProps{ + {{ range $k, $v := .Fields -}} + "{{ $k }}": {{ $v }}, + {{ end -}} + }, + {{if .Required}}Required: []string{ + {{ range $k, $v := .Required -}} + "{{ $v }}", + {{ end -}} + },{{ end -}} +}`)) + +// parseObjectValidation returns a JSONSchemaProps object and its serialization in +// Go that describe the validations for the given object type. +func (b *APIs) parseObjectValidation(t *types.Type, found sets.String, comments []string, isRoot bool) (v1beta1.JSONSchemaProps, string) { + buff := &bytes.Buffer{} + props := v1beta1.JSONSchemaProps{ + Type: "object", + Description: parseDescription(comments), + } + + for _, l := range comments { + getValidation(l, &props) + } + + if strings.HasPrefix(t.Name.String(), "k8s.io/api") { + if err := objectTemplate.Execute(buff, objectTemplateArgs{props, nil, nil, false}); err != nil { + log.Fatalf("%v", err) + } + } else { + m, result, required := b.getMembers(t, found) + props.Properties = m + props.Required = required + + if err := objectTemplate.Execute(buff, objectTemplateArgs{props, result, required, isRoot}); err != nil { + log.Fatalf("%v", err) + } + } + return props, buff.String() +} + +// getValidation parses the validation tags from the comment and sets the +// validation rules on the given JSONSchemaProps. +func getValidation(comment string, props *v1beta1.JSONSchemaProps) { + comment = strings.TrimLeft(comment, " ") + if !strings.HasPrefix(comment, "+kubebuilder:validation:") { + return + } + c := strings.Replace(comment, "+kubebuilder:validation:", "", -1) + parts := strings.Split(c, "=") + if len(parts) != 2 { + log.Fatalf("Expected +kubebuilder:validation:<key>=<value> actual: %s", comment) + return + } + switch parts[0] { + case "Maximum": + f, err := strconv.ParseFloat(parts[1], 64) + if err != nil { + log.Fatalf("Could not parse float from %s: %v", comment, err) + return + } + props.Maximum = &f + case "ExclusiveMaximum": + b, err := strconv.ParseBool(parts[1]) + if err != nil { + log.Fatalf("Could not parse bool from %s: %v", comment, err) + return + } + props.ExclusiveMaximum = b + case "Minimum": + f, err := strconv.ParseFloat(parts[1], 64) + if err != nil { + log.Fatalf("Could not parse float from %s: %v", comment, err) + return + } + props.Minimum = &f + case "ExclusiveMinimum": + b, err := strconv.ParseBool(parts[1]) + if err != nil { + log.Fatalf("Could not parse bool from %s: %v", comment, err) + return + } + props.ExclusiveMinimum = b + case "MaxLength": + i, err := strconv.Atoi(parts[1]) + v := int64(i) + if err != nil { + log.Fatalf("Could not parse int from %s: %v", comment, err) + return + } + props.MaxLength = &v + case "MinLength": + i, err := strconv.Atoi(parts[1]) + v := int64(i) + if err != nil { + log.Fatalf("Could not parse int from %s: %v", comment, err) + return + } + props.MinLength = &v + case "Pattern": + props.Pattern = parts[1] + case "MaxItems": + if props.Type == "array" { + i, err := strconv.Atoi(parts[1]) + v := int64(i) + if err != nil { + log.Fatalf("Could not parse int from %s: %v", comment, err) + return + } + props.MaxItems = &v + } + case "MinItems": + if props.Type == "array" { + i, err := strconv.Atoi(parts[1]) + v := int64(i) + if err != nil { + log.Fatalf("Could not parse int from %s: %v", comment, err) + return + } + props.MinItems = &v + } + case "UniqueItems": + if props.Type == "array" { + b, err := strconv.ParseBool(parts[1]) + if err != nil { + log.Fatalf("Could not parse bool from %s: %v", comment, err) + return + } + props.UniqueItems = b + } + case "MultipleOf": + f, err := strconv.ParseFloat(parts[1], 64) + if err != nil { + log.Fatalf("Could not parse float from %s: %v", comment, err) + return + } + props.MultipleOf = &f + case "Enum": + if props.Type != "array" { + value := strings.Split(parts[1], ",") + enums := []v1beta1.JSON{} + for _, s := range value { + checkType(props, s, &enums) + } + props.Enum = enums + } + case "Format": + props.Format = parts[1] + default: + log.Fatalf("Unsupport validation: %s", comment) + } +} + +// getMembers builds maps by field name of the JSONSchemaProps and their Go +// serializations. +func (b *APIs) getMembers(t *types.Type, found sets.String) (map[string]v1beta1.JSONSchemaProps, map[string]string, []string) { + members := map[string]v1beta1.JSONSchemaProps{} + result := map[string]string{} + required := []string{} + + // Don't allow recursion until we support it through refs + // TODO: Support recursion + if found.Has(t.Name.String()) { + fmt.Printf("Breaking recursion for type %s", t.Name.String()) + return members, result, required + } + found.Insert(t.Name.String()) + + for _, member := range t.Members { + tags := jsonRegex.FindStringSubmatch(member.Tags) + if len(tags) == 0 { + // Skip fields without json tags + //fmt.Printf("Skipping member %s %s\n", member.Name, member.Type.Name.String()) + continue + } + ts := strings.Split(tags[1], ",") + name := member.Name + strat := "" + if len(ts) > 0 && len(ts[0]) > 0 { + name = ts[0] + } + if len(ts) > 1 { + strat = ts[1] + } + + // Inline "inline" structs + if strat == "inline" { + m, r, re := b.getMembers(member.Type, found) + for n, v := range m { + members[n] = v + } + for n, v := range r { + result[n] = v + } + required = append(required, re...) + } else { + m, r := b.typeToJSONSchemaProps(member.Type, found, member.CommentLines, false) + members[name] = m + result[name] = r + if !strings.HasSuffix(strat, "omitempty") { + required = append(required, name) + } + } + } + + defer found.Delete(t.Name.String()) + return members, result, required +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/index.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/index.go new file mode 100644 index 0000000000000000000000000000000000000000..a08cf751b56b14ce2f2fa715db07dcf4c071b8bc --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/index.go @@ -0,0 +1,161 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parse + +import ( + "fmt" + "log" + "strings" + + "github.com/markbates/inflect" + "k8s.io/gengo/types" + "sigs.k8s.io/controller-tools/pkg/internal/codegen" + "sigs.k8s.io/controller-tools/pkg/internal/general" +) + +// parseIndex indexes all types with the comment "// +resource=RESOURCE" by GroupVersionKind and +// GroupKindVersion +func (b *APIs) parseIndex() { + // Index resource by group, version, kind + b.ByGroupVersionKind = map[string]map[string]map[string]*codegen.APIResource{} + + // Index resources by group, kind, version + b.ByGroupKindVersion = map[string]map[string]map[string]*codegen.APIResource{} + + // Index subresources by group, version, kind + b.SubByGroupVersionKind = map[string]map[string]map[string]*types.Type{} + + for _, c := range b.context.Order { + // The type is a subresource, add it to the subresource index + if IsAPISubresource(c) { + group := GetGroup(c) + version := GetVersion(c, group) + kind := GetKind(c, group) + if _, f := b.SubByGroupVersionKind[group]; !f { + b.SubByGroupVersionKind[group] = map[string]map[string]*types.Type{} + } + if _, f := b.SubByGroupVersionKind[group][version]; !f { + b.SubByGroupVersionKind[group][version] = map[string]*types.Type{} + } + b.SubByGroupVersionKind[group][version][kind] = c + } + + // If it isn't a subresource or resource, continue to the next type + if !IsAPIResource(c) { + continue + } + + // Parse out the resource information + r := &codegen.APIResource{ + Type: c, + NonNamespaced: IsNonNamespaced(c), + } + r.Group = GetGroup(c) + r.Version = GetVersion(c, r.Group) + r.Kind = GetKind(c, r.Group) + r.Domain = b.Domain + + // TODO: revisit the part... + if r.Resource == "" { + rs := inflect.NewDefaultRuleset() + r.Resource = rs.Pluralize(strings.ToLower(r.Kind)) + } + rt, err := parseResourceAnnotation(c) + if err != nil { + log.Fatalf("failed to parse resource annotations, error: %v", err.Error()) + } + if rt.Resource != "" { + r.Resource = rt.Resource + } + r.ShortName = rt.ShortName + + // Copy the Status strategy to mirror the non-status strategy + r.StatusStrategy = strings.TrimSuffix(r.Strategy, "Strategy") + r.StatusStrategy = fmt.Sprintf("%sStatusStrategy", r.StatusStrategy) + + // Initialize the map entries so they aren't nill + if _, f := b.ByGroupKindVersion[r.Group]; !f { + b.ByGroupKindVersion[r.Group] = map[string]map[string]*codegen.APIResource{} + } + if _, f := b.ByGroupKindVersion[r.Group][r.Kind]; !f { + b.ByGroupKindVersion[r.Group][r.Kind] = map[string]*codegen.APIResource{} + } + if _, f := b.ByGroupVersionKind[r.Group]; !f { + b.ByGroupVersionKind[r.Group] = map[string]map[string]*codegen.APIResource{} + } + if _, f := b.ByGroupVersionKind[r.Group][r.Version]; !f { + b.ByGroupVersionKind[r.Group][r.Version] = map[string]*codegen.APIResource{} + } + + // Add the resource to the map + b.ByGroupKindVersion[r.Group][r.Kind][r.Version] = r + b.ByGroupVersionKind[r.Group][r.Version][r.Kind] = r + r.Type = c + } +} + +// resourceTags contains the tags present in a "+resource=" comment +type resourceTags struct { + Resource string + REST string + Strategy string + ShortName string +} + +// resourceAnnotationValue is a helper function to extract resource annotation. +func resourceAnnotationValue(tag string) (resourceTags, error) { + res := resourceTags{} + for _, elem := range strings.Split(tag, ",") { + key, value, err := general.ParseKV(elem) + if err != nil { + return resourceTags{}, fmt.Errorf("// +kubebuilder:resource: tags must be key value pairs. Expected "+ + "keys [path=<resourcepath>] "+ + "Got string: [%s]", tag) + } + switch key { + case "path": + res.Resource = value + case "shortName": + res.ShortName = value + default: + return resourceTags{}, fmt.Errorf("The given input %s is invalid", value) + } + } + return res, nil +} + +// parseResourceAnnotation parses the tags in a "+resource=" comment into a resourceTags struct. +func parseResourceAnnotation(t *types.Type) (resourceTags, error) { + finalResult := resourceTags{} + var resourceAnnotationFound bool + for _, comment := range t.CommentLines { + anno := general.GetAnnotation(comment, "kubebuilder:resource") + if len(anno) == 0 { + continue + } + result, err := resourceAnnotationValue(anno) + if err != nil { + return resourceTags{}, err + } + if resourceAnnotationFound { + return resourceTags{}, fmt.Errorf("resource annotation should only exists once per type") + } + resourceAnnotationFound = true + finalResult = result + } + return finalResult, nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/parser.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/parser.go new file mode 100644 index 0000000000000000000000000000000000000000..c7a55dd3deccaee015539bd4c16168146f7150c6 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/parser.go @@ -0,0 +1,151 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parse + +import ( + "bufio" + "go/build" + "log" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/types" + "sigs.k8s.io/controller-tools/pkg/internal/codegen" +) + +// APIs is the information of a collection of API +type APIs struct { + context *generator.Context + arguments *args.GeneratorArgs + Domain string + VersionedPkgs sets.String + UnversionedPkgs sets.String + APIsPkg string + APIsPkgRaw *types.Package + GroupNames sets.String + + APIs *codegen.APIs + Controllers []codegen.Controller + + ByGroupKindVersion map[string]map[string]map[string]*codegen.APIResource + ByGroupVersionKind map[string]map[string]map[string]*codegen.APIResource + SubByGroupVersionKind map[string]map[string]map[string]*types.Type + Groups map[string]types.Package + Rules []rbacv1.PolicyRule + Informers map[v1.GroupVersionKind]bool +} + +// NewAPIs returns a new APIs instance with given context. +func NewAPIs(context *generator.Context, arguments *args.GeneratorArgs, domain, apisPkg string) *APIs { + b := &APIs{ + context: context, + arguments: arguments, + Domain: domain, + APIsPkg: apisPkg, + } + b.parsePackages() + b.parseGroupNames() + b.parseIndex() + b.parseAPIs() + b.parseCRDs() + if len(b.Domain) == 0 { + b.parseDomain() + } + return b +} + +// parseGroupNames initializes b.GroupNames with the set of all groups +func (b *APIs) parseGroupNames() { + b.GroupNames = sets.String{} + for p := range b.UnversionedPkgs { + pkg := b.context.Universe[p] + if pkg == nil { + // If the input had no Go files, for example. + continue + } + b.GroupNames.Insert(filepath.Base(p)) + } +} + +// parsePackages parses out the sets of Versioned, Unversioned packages and identifies the root Apis package. +func (b *APIs) parsePackages() { + b.VersionedPkgs = sets.NewString() + b.UnversionedPkgs = sets.NewString() + for _, o := range b.context.Order { + if IsAPIResource(o) { + versioned := o.Name.Package + b.VersionedPkgs.Insert(versioned) + + unversioned := filepath.Dir(versioned) + b.UnversionedPkgs.Insert(unversioned) + } + } +} + +// parseDomain parses the domain from the apis/doc.go file comment "// +domain=YOUR_DOMAIN". +func (b *APIs) parseDomain() { + pkg := b.context.Universe[b.APIsPkg] + if pkg == nil { + // If the input had no Go files, for example. + panic(errors.Errorf("Missing apis package.")) + } + comments := Comments(pkg.Comments) + b.Domain = comments.getTag("domain", "=") + if len(b.Domain) == 0 { + b.Domain = parseDomainFromFiles(b.context.Inputs) + if len(b.Domain) == 0 { + panic("Could not find string matching // +domain=.+ in apis/doc.go") + } + } +} + +func parseDomainFromFiles(paths []string) string { + var domain string + for _, path := range paths { + if strings.HasSuffix(path, "pkg/apis") { + filePath := strings.Join([]string{build.Default.GOPATH, "src", path, "doc.go"}, "/") + lines := []string{} + + file, err := os.Open(filePath) + if err != nil { + log.Fatal(err) + } + defer file.Close() + scanner := bufio.NewScanner(file) + for scanner.Scan() { + if strings.HasPrefix(scanner.Text(), "//") { + lines = append(lines, strings.Replace(scanner.Text(), "// ", "", 1)) + } + } + if err := scanner.Err(); err != nil { + log.Fatal(err) + } + + comments := Comments(lines) + domain = comments.getTag("domain", "=") + break + } + } + return domain +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/util.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/util.go new file mode 100644 index 0000000000000000000000000000000000000000..7df44535344233d2d3de5dd38ba6777866b153a8 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/util.go @@ -0,0 +1,539 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parse + +import ( + "fmt" + "log" + "path/filepath" + "strconv" + "strings" + + "github.com/pkg/errors" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/gengo/types" +) + +const ( + specReplicasPath = "specpath" + statusReplicasPath = "statuspath" + labelSelectorPath = "selectorpath" + jsonPathError = "invalid scale path. specpath, statuspath key-value pairs are required, only selectorpath key-value is optinal. For example: // +kubebuilder:subresource:scale:specpath=.spec.replica,statuspath=.status.replica,selectorpath=.spec.Label" + printColumnName = "name" + printColumnType = "type" + printColumnDescr = "description" + printColumnPath = "JSONPath" + printColumnFormat = "format" + printColumnPri = "priority" + printColumnError = "invalid printcolumn path. name,type, and JSONPath are required kye-value pairs and rest of the fields are optinal. For example: // +kubebuilder:printcolumn:name=abc,type=string,JSONPath=status" +) + +// Options contains the parser options +type Options struct { + SkipMapValidation bool + + // SkipRBACValidation flag determines whether to check RBAC annotations + // for the controller or not at parse stage. + SkipRBACValidation bool +} + +// IsAPIResource returns true if either of the two conditions become true: +// 1. t has a +resource/+kubebuilder:resource comment tag +// 2. t has TypeMeta and ObjectMeta in its member list. +func IsAPIResource(t *types.Type) bool { + for _, c := range t.CommentLines { + if strings.Contains(c, "+resource") || strings.Contains(c, "+kubebuilder:resource") { + return true + } + } + + typeMetaFound, objMetaFound := false, false + for _, m := range t.Members { + if m.Name == "TypeMeta" && m.Type.String() == "k8s.io/apimachinery/pkg/apis/meta/v1.TypeMeta" { + typeMetaFound = true + } + if m.Name == "ObjectMeta" && m.Type.String() == "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta" { + objMetaFound = true + } + if typeMetaFound && objMetaFound { + return true + } + } + return false +} + +// IsNonNamespaced returns true if t has a +nonNamespaced comment tag +func IsNonNamespaced(t *types.Type) bool { + if !IsAPIResource(t) { + return false + } + + for _, c := range t.CommentLines { + if strings.Contains(c, "+genclient:nonNamespaced") { + return true + } + } + + for _, c := range t.SecondClosestCommentLines { + if strings.Contains(c, "+genclient:nonNamespaced") { + return true + } + } + + return false +} + +// IsController returns true if t has a +controller or +kubebuilder:controller tag +func IsController(t *types.Type) bool { + for _, c := range t.CommentLines { + if strings.Contains(c, "+controller") || strings.Contains(c, "+kubebuilder:controller") { + return true + } + } + return false +} + +// IsRBAC returns true if t has a +rbac or +kubebuilder:rbac tag +func IsRBAC(t *types.Type) bool { + for _, c := range t.CommentLines { + if strings.Contains(c, "+rbac") || strings.Contains(c, "+kubebuilder:rbac") { + return true + } + } + return false +} + +// hasPrintColumn returns true if t has a +printcolumn or +kubebuilder:printcolumn annotation. +func hasPrintColumn(t *types.Type) bool { + for _, c := range t.CommentLines { + if strings.Contains(c, "+printcolumn") || strings.Contains(c, "+kubebuilder:printcolumn") { + return true + } + } + return false +} + +// IsInformer returns true if t has a +informers or +kubebuilder:informers tag +func IsInformer(t *types.Type) bool { + for _, c := range t.CommentLines { + if strings.Contains(c, "+informers") || strings.Contains(c, "+kubebuilder:informers") { + return true + } + } + return false +} + +// IsAPISubresource returns true if t has a +subresource-request comment tag +func IsAPISubresource(t *types.Type) bool { + for _, c := range t.CommentLines { + if strings.Contains(c, "+subresource-request") { + return true + } + } + return false +} + +// HasSubresource returns true if t is an APIResource with one or more Subresources +func HasSubresource(t *types.Type) bool { + if !IsAPIResource(t) { + return false + } + for _, c := range t.CommentLines { + if strings.Contains(c, "subresource") { + return true + } + } + return false +} + +// hasStatusSubresource returns true if t is an APIResource annotated with +// +kubebuilder:subresource:status +func hasStatusSubresource(t *types.Type) bool { + if !IsAPIResource(t) { + return false + } + for _, c := range t.CommentLines { + if strings.Contains(c, "+kubebuilder:subresource:status") { + return true + } + } + return false +} + +// hasScaleSubresource returns true if t is an APIResource annotated with +// +kubebuilder:subresource:scale +func hasScaleSubresource(t *types.Type) bool { + if !IsAPIResource(t) { + return false + } + for _, c := range t.CommentLines { + if strings.Contains(c, "+kubebuilder:subresource:scale") { + return true + } + } + return false +} + +// hasCategories returns true if t is an APIResource annotated with +// +kubebuilder:categories +func hasCategories(t *types.Type) bool { + if !IsAPIResource(t) { + return false + } + + for _, c := range t.CommentLines { + if strings.Contains(c, "+kubebuilder:categories") { + return true + } + } + return false +} + +// HasDocAnnotation returns true if t is an APIResource with doc annotation +// +kubebuilder:doc +func HasDocAnnotation(t *types.Type) bool { + if !IsAPIResource(t) { + return false + } + for _, c := range t.CommentLines { + if strings.Contains(c, "+kubebuilder:doc") { + return true + } + } + return false +} + +// hasSingular returns true if t is an APIResource annotated with +// +kubebuilder:singular +func hasSingular(t *types.Type) bool { + if !IsAPIResource(t) { + return false + } + for _, c := range t.CommentLines{ + if strings.Contains(c, "+kubebuilder:singular"){ + return true + } + } + return false +} + +// IsUnversioned returns true if t is in given group, and not in versioned path. +func IsUnversioned(t *types.Type, group string) bool { + return IsApisDir(filepath.Base(filepath.Dir(t.Name.Package))) && GetGroup(t) == group +} + +// IsVersioned returns true if t is in given group, and in versioned path. +func IsVersioned(t *types.Type, group string) bool { + dir := filepath.Base(filepath.Dir(filepath.Dir(t.Name.Package))) + return IsApisDir(dir) && GetGroup(t) == group +} + +// GetVersion returns version of t. +func GetVersion(t *types.Type, group string) string { + if !IsVersioned(t, group) { + panic(errors.Errorf("Cannot get version for unversioned type %v", t.Name)) + } + return filepath.Base(t.Name.Package) +} + +// GetGroup returns group of t. +func GetGroup(t *types.Type) string { + return filepath.Base(GetGroupPackage(t)) +} + +// GetGroupPackage returns group package of t. +func GetGroupPackage(t *types.Type) string { + if IsApisDir(filepath.Base(filepath.Dir(t.Name.Package))) { + return t.Name.Package + } + return filepath.Dir(t.Name.Package) +} + +// GetKind returns kind of t. +func GetKind(t *types.Type, group string) string { + if !IsVersioned(t, group) && !IsUnversioned(t, group) { + panic(errors.Errorf("Cannot get kind for type not in group %v", t.Name)) + } + return t.Name.Name +} + +// IsApisDir returns true if a directory path is a Kubernetes api directory +func IsApisDir(dir string) bool { + return dir == "apis" || dir == "api" +} + +// Comments is a structure for using comment tags on go structs and fields +type Comments []string + +// GetTags returns the value for the first comment with a prefix matching "+name=" +// e.g. "+name=foo\n+name=bar" would return "foo" +func (c Comments) getTag(name, sep string) string { + for _, c := range c { + prefix := fmt.Sprintf("+%s%s", name, sep) + if strings.HasPrefix(c, prefix) { + return strings.Replace(c, prefix, "", 1) + } + } + return "" +} + +// hasTag returns true if the Comments has a tag with the given name +func (c Comments) hasTag(name string) bool { + for _, c := range c { + prefix := fmt.Sprintf("+%s", name) + if strings.HasPrefix(c, prefix) { + return true + } + } + return false +} + +// GetTags returns the value for all comments with a prefix and separator. E.g. for "name" and "=" +// "+name=foo\n+name=bar" would return []string{"foo", "bar"} +func (c Comments) getTags(name, sep string) []string { + tags := []string{} + for _, c := range c { + prefix := fmt.Sprintf("+%s%s", name, sep) + if strings.HasPrefix(c, prefix) { + tags = append(tags, strings.Replace(c, prefix, "", 1)) + } + } + return tags +} + +// getCategoriesTag returns the value of the +kubebuilder:categories tags +func getCategoriesTag(c *types.Type) string { + comments := Comments(c.CommentLines) + resource := comments.getTag("kubebuilder:categories", "=") + if len(resource) == 0 { + panic(errors.Errorf("Must specify +kubebuilder:categories comment for type %v", c.Name)) + } + return resource +} + +// getSingularName returns the value of the +kubebuilder:singular tag +func getSingularName(c *types.Type) string { + comments := Comments(c.CommentLines) + singular := comments.getTag("kubebuilder:singular", "=") + if len(singular) == 0 { + panic(errors.Errorf("Must specify a value to use with +kubebuilder:singular comment for type %v", c.Name)) + } + return singular +} + +// getDocAnnotation parse annotations of "+kubebuilder:doc:" with tags of "warning" or "doc" for control generating doc config. +// E.g. +kubebuilder:doc:warning=foo +kubebuilder:doc:note=bar +func getDocAnnotation(t *types.Type, tags ...string) map[string]string { + annotation := make(map[string]string) + for _, tag := range tags { + for _, c := range t.CommentLines { + prefix := fmt.Sprintf("+kubebuilder:doc:%s=", tag) + if strings.HasPrefix(c, prefix) { + annotation[tag] = strings.Replace(c, prefix, "", 1) + } + } + } + return annotation +} + +// parseByteValue returns the literal digital number values from a byte array +func parseByteValue(b []byte) string { + elem := strings.Join(strings.Fields(fmt.Sprintln(b)), ",") + elem = strings.TrimPrefix(elem, "[") + elem = strings.TrimSuffix(elem, "]") + return elem +} + +// parseDescription parse comments above each field in the type definition. +func parseDescription(res []string) string { + var temp strings.Builder + var desc string + for _, comment := range res { + if !(strings.Contains(comment, "+kubebuilder") || strings.Contains(comment, "+optional")) { + temp.WriteString(comment) + temp.WriteString(" ") + desc = strings.TrimRight(temp.String(), " ") + } + } + return desc +} + +// parseEnumToString returns a representive validated go format string from JSONSchemaProps schema +func parseEnumToString(value []v1beta1.JSON) string { + res := "[]v1beta1.JSON{" + prefix := "v1beta1.JSON{[]byte{" + for _, v := range value { + res = res + prefix + parseByteValue(v.Raw) + "}}," + } + return strings.TrimSuffix(res, ",") + "}" +} + +// check type of enum element value to match type of field +func checkType(props *v1beta1.JSONSchemaProps, s string, enums *[]v1beta1.JSON) { + + // TODO support more types check + switch props.Type { + case "int", "int64", "uint64": + if _, err := strconv.ParseInt(s, 0, 64); err != nil { + log.Fatalf("Invalid integer value [%v] for a field of integer type", s) + } + *enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))}) + case "int32", "unit32": + if _, err := strconv.ParseInt(s, 0, 32); err != nil { + log.Fatalf("Invalid integer value [%v] for a field of integer32 type", s) + } + *enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))}) + case "float", "float32": + if _, err := strconv.ParseFloat(s, 32); err != nil { + log.Fatalf("Invalid float value [%v] for a field of float32 type", s) + } + *enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))}) + case "float64": + if _, err := strconv.ParseFloat(s, 64); err != nil { + log.Fatalf("Invalid float value [%v] for a field of float type", s) + } + *enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))}) + case "string": + *enums = append(*enums, v1beta1.JSON{Raw: []byte(`"` + s + `"`)}) + } +} + +// Scale subresource requires specpath, statuspath, selectorpath key values, represents for JSONPath of +// SpecReplicasPath, StatusReplicasPath, LabelSelectorPath separately. e.g. +// +kubebuilder:subresource:scale:specpath=.spec.replica,statuspath=.status.replica,selectorpath= +func parseScaleParams(t *types.Type) (map[string]string, error) { + jsonPath := make(map[string]string) + for _, c := range t.CommentLines { + if strings.Contains(c, "+kubebuilder:subresource:scale") { + paths := strings.Replace(c, "+kubebuilder:subresource:scale:", "", -1) + path := strings.Split(paths, ",") + if len(path) < 2 { + return nil, fmt.Errorf(jsonPathError) + } + for _, s := range path { + kv := strings.Split(s, "=") + if kv[0] == specReplicasPath || kv[0] == statusReplicasPath || kv[0] == labelSelectorPath { + jsonPath[kv[0]] = kv[1] + } else { + return nil, fmt.Errorf(jsonPathError) + } + } + var ok bool + _, ok = jsonPath[specReplicasPath] + if !ok { + return nil, fmt.Errorf(jsonPathError) + } + _, ok = jsonPath[statusReplicasPath] + if !ok { + return nil, fmt.Errorf(jsonPathError) + } + return jsonPath, nil + } + } + return nil, fmt.Errorf(jsonPathError) +} + +// printColumnKV parses key-value string formatted as "foo=bar" and returns key and value. +func printColumnKV(s string) (key, value string, err error) { + kv := strings.SplitN(s, "=", 2) + if len(kv) != 2 { + err = fmt.Errorf("invalid key value pair") + return key, value, err + } + key, value = kv[0], kv[1] + if strings.HasPrefix(value, "\"") && strings.HasSuffix(value, "\"") { + value = value[1 : len(value)-1] + } + return key, value, err +} + +// helperPrintColumn is a helper function for the parsePrintColumnParams to compute printer columns. +func helperPrintColumn(parts string, comment string) (v1beta1.CustomResourceColumnDefinition, error) { + config := v1beta1.CustomResourceColumnDefinition{} + var count int + part := strings.Split(parts, ",") + if len(part) < 3 { + return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf(printColumnError) + } + + for _, elem := range strings.Split(parts, ",") { + key, value, err := printColumnKV(elem) + if err != nil { + return v1beta1.CustomResourceColumnDefinition{}, + fmt.Errorf("//+kubebuilder:printcolumn: tags must be key value pairs.Expected "+ + "keys [name=<name>,type=<type>,description=<descr>,format=<format>] "+ + "Got string: [%s]", parts) + } + if key == printColumnName || key == printColumnType || key == printColumnPath { + count++ + } + switch key { + case printColumnName: + config.Name = value + case printColumnType: + if value == "integer" || value == "number" || value == "string" || value == "boolean" || value == "date" { + config.Type = value + } else { + return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf("invalid value for %s printcolumn", printColumnType) + } + case printColumnFormat: + if config.Type == "integer" && (value == "int32" || value == "int64") { + config.Format = value + } else if config.Type == "number" && (value == "float" || value == "double") { + config.Format = value + } else if config.Type == "string" && (value == "byte" || value == "date" || value == "date-time" || value == "password") { + config.Format = value + } else { + return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf("invalid value for %s printcolumn", printColumnFormat) + } + case printColumnPath: + config.JSONPath = value + case printColumnPri: + i, err := strconv.Atoi(value) + v := int32(i) + if err != nil { + return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf("invalid value for %s printcolumn", printColumnPri) + } + config.Priority = v + case printColumnDescr: + config.Description = value + default: + return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf(printColumnError) + } + } + if count != 3 { + return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf(printColumnError) + } + return config, nil +} + +// printcolumn requires name,type,JSONPath fields and rest of the field are optional +// +kubebuilder:printcolumn:name=<name>,type=<type>,description=<desc>,JSONPath:<.spec.Name>,priority=<int32>,format=<format> +func parsePrintColumnParams(t *types.Type) ([]v1beta1.CustomResourceColumnDefinition, error) { + result := []v1beta1.CustomResourceColumnDefinition{} + for _, comment := range t.CommentLines { + if strings.Contains(comment, "+kubebuilder:printcolumn") { + parts := strings.Replace(comment, "+kubebuilder:printcolumn:", "", -1) + res, err := helperPrintColumn(parts, comment) + if err != nil { + return []v1beta1.CustomResourceColumnDefinition{}, err + } + result = append(result, res) + } + } + return result, nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/types.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/types.go new file mode 100644 index 0000000000000000000000000000000000000000..ebfaf620d0f034368621cdea67aeda5d74f6d2a8 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/types.go @@ -0,0 +1,213 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package codegen + +import ( + "sort" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/gengo/types" +) + +// APIs is the information of a collection of API +type APIs struct { + // Domain is the domain portion of the group - e.g. k8s.io + Domain string + + // Package is the name of the root API package - e.g. github.com/my-org/my-repo/pkg/apis + Package string + + // Pkg the Package for the root API package + Pkg *types.Package + + // Groups is the list of API groups found under the apis package + Groups map[string]*APIGroup + + Rules []rbacv1.PolicyRule + + Informers map[v1.GroupVersionKind]bool +} + +// GetRules get rules of the APIs +func (apis *APIs) GetRules() []rbacv1.PolicyRule { + rules := []rbacv1.PolicyRule{} + rulesIndex := map[v1.GroupResource]sets.String{} + for _, rule := range apis.Rules { + for _, g := range rule.APIGroups { + for _, r := range rule.Resources { + gr := v1.GroupResource{ + Group: g, + Resource: r, + } + if _, found := rulesIndex[gr]; !found { + rulesIndex[gr] = sets.NewString() + } + rulesIndex[gr].Insert(rule.Verbs...) + } + } + } + for gr, v := range rulesIndex { + verbs := v.List() + sort.Strings(verbs) + rule := rbacv1.PolicyRule{ + Resources: []string{gr.Resource}, + APIGroups: []string{gr.Group}, + Verbs: verbs, + } + rules = append(rules, rule) + } + return rules +} + +// APIGroup contains information of an API group. +type APIGroup struct { + // Package is the name of the go package the api group is under - e.g. github.com/me/apiserver-helloworld/apis + Package string + // Domain is the domain portion of the group - e.g. k8s.io + Domain string + // Group is the short name of the group - e.g. mushroomkingdom + Group string + GroupTitle string + // Versions is the list of all versions for this group keyed by name + Versions map[string]*APIVersion + + UnversionedResources map[string]*APIResource + + // Structs is a list of unversioned definitions that must be generated + Structs []*Struct + Pkg *types.Package + PkgPath string +} + +// Struct contains information of a struct. +type Struct struct { + // Name is the name of the type + Name string + // genClient + GenClient bool + GenDeepCopy bool + NonNamespaced bool + + GenUnversioned bool + // Fields is the list of fields appearing in the struct + Fields []*Field +} + +// Field contains information of a field. +type Field struct { + // Name is the name of the field + Name string + // For versioned Kubernetes types, this is the versioned package + VersionedPackage string + // For versioned Kubernetes types, this is the unversioned package + UnversionedImport string + UnversionedType string +} + +// APIVersion contains information of an API version. +type APIVersion struct { + // Domain is the group domain - e.g. k8s.io + Domain string + // Group is the group name - e.g. mushroomkingdom + Group string + // Version is the api version - e.g. v1beta1 + Version string + // Resources is a list of resources appearing in the API version keyed by name + Resources map[string]*APIResource + // Pkg is the Package object from code-gen + Pkg *types.Package +} + +// APIResource contains information of an API resource. +type APIResource struct { + // Domain is the group domain - e.g. k8s.io + Domain string + // Group is the group name - e.g. mushroomkingdom + Group string + // Version is the api version - e.g. v1beta1 + Version string + // Kind is the resource name - e.g. PeachesCastle + Kind string + // Resource is the resource name - e.g. peachescastles + Resource string + // REST is the rest.Storage implementation used to handle requests + // This field is optional. The standard REST implementation will be used + // by default. + REST string + // Subresources is a map of subresources keyed by name + Subresources map[string]*APISubresource + // Type is the Type object from code-gen + Type *types.Type + // Strategy is name of the struct to use for the strategy + Strategy string + // Strategy is name of the struct to use for the strategy + StatusStrategy string + // NonNamespaced indicates that the resource kind is non namespaced + NonNamespaced bool + + ShortName string + + JSONSchemaProps v1beta1.JSONSchemaProps + CRD v1beta1.CustomResourceDefinition + Validation string + ValidationComments string + // DocAnnotation is a map of annotations by name for doc. e.g. warning, notes message + DocAnnotation map[string]string + // Categories is a list of categories the resource is part of. + Categories []string +} + +// APISubresource contains information of an API subresource. +type APISubresource struct { + // Domain is the group domain - e.g. k8s.io + Domain string + // Group is the group name - e.g. mushroomkingdom + Group string + // Version is the api version - e.g. v1beta1 + Version string + // Kind is the resource name - e.g. PeachesCastle + Kind string + // Resource is the resource name - e.g. peachescastles + Resource string + // Request is the subresource request type - e.g. ScaleCastle + Request string + // REST is the rest.Storage implementation used to handle requests + REST string + // Path is the subresource path - e.g. scale + Path string + + // ImportPackage is the import statement that must appear for the Request + ImportPackage string + + // RequestType is the type of the request + RequestType *types.Type + + // RESTType is the type of the request handler + RESTType *types.Type +} + +// Controller contains information of a controller. +type Controller struct { + Target schema.GroupVersionKind + Resource string + Pkg *types.Package + Repo string +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/general/util.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/general/util.go new file mode 100644 index 0000000000000000000000000000000000000000..afa889e36a0a05035fcd40b4093a42ebd45538ca --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/internal/general/util.go @@ -0,0 +1,102 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package general + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "os" + "path/filepath" + "strings" +) + +// isGoFile filters files from parsing. +func isGoFile(f os.FileInfo) bool { + // ignore non-Go or Go test files + name := f.Name() + return !f.IsDir() && + !strings.HasPrefix(name, ".") && + !strings.HasSuffix(name, "_test.go") && + strings.HasSuffix(name, ".go") +} + +// GetAnnotation extracts the annotation from comment text. +// It will return "foo" for comment "+kubebuilder:webhook:foo" . +func GetAnnotation(c, name string) string { + prefix := fmt.Sprintf("+%s:", name) + if strings.HasPrefix(c, prefix) { + return strings.TrimPrefix(c, prefix) + } + return "" +} + +// ParseKV parses key-value string formatted as "foo=bar" and returns key and value. +func ParseKV(s string) (key, value string, err error) { + kv := strings.Split(s, "=") + if len(kv) != 2 { + err = fmt.Errorf("invalid key value pair") + return key, value, err + } + key, value = kv[0], kv[1] + if strings.HasPrefix(value, "\"") && strings.HasSuffix(value, "\"") { + value = value[1 : len(value)-1] + } + return key, value, err +} + +// ParseDir parses the Go files under given directory and parses the annotation by +// invoking the parseFn function on each comment group (multi-lines comments). +// TODO(droot): extend it to multiple dirs +func ParseDir(dir string, parseFn func(string) error) error { + fset := token.NewFileSet() + + err := filepath.Walk(dir, + func(path string, info os.FileInfo, _ error) error { + if !isGoFile(info) { + // TODO(droot): enable this output based on verbose flag + // fmt.Println("skipping non-go file", path) + return nil + } + return ParseFile(fset, path, nil, parseFn) + }) + return err +} + +// ParseFile parses given filename or content src and parses annotations by +// invoking the parseFn function on each comment group (multi-lines comments). +func ParseFile(fset *token.FileSet, filename string, src interface{}, parseFn func(string) error) error { + f, err := parser.ParseFile(fset, filename, src, parser.ParseComments) + if err != nil { + fmt.Printf("error from parse.ParseFile: %v", err) + return err + } + + // using commentMaps here because it sanitizes the comment text by removing + // comment markers, compresses newlines etc. + cmap := ast.NewCommentMap(fset, f, f.Comments) + + for _, commentGroup := range cmap.Comments() { + err = parseFn(commentGroup.Text()) + if err != nil { + fmt.Print("error when parsing annotation") + return err + } + } + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/rbac/manifests.go b/vendor/sigs.k8s.io/controller-tools/pkg/rbac/manifests.go new file mode 100644 index 0000000000000000000000000000000000000000..629480ec1b5ce13b590da687e4e5bf366f52bbc2 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/rbac/manifests.go @@ -0,0 +1,170 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rbac + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/ghodss/yaml" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-tools/pkg/internal/general" +) + +// ManifestOptions represent options for generating the RBAC manifests. +type ManifestOptions struct { + InputDir string + OutputDir string + RoleFile string + BindingFile string + Name string + ServiceAccount string + Namespace string + Labels map[string]string +} + +// SetDefaults sets up the default options for RBAC Manifest generator. +func (o *ManifestOptions) SetDefaults() { + o.Name = "manager" + o.InputDir = filepath.Join(".", "pkg") + o.OutputDir = filepath.Join(".", "config", "rbac") + o.ServiceAccount = "default" + o.Namespace = "system" +} + +// RoleName returns the RBAC role name to be used in the manifests. +func (o *ManifestOptions) RoleName() string { + return o.Name + "-role" +} + +// RoleFileName returns the name of the manifest file to use for the role. +func (o *ManifestOptions) RoleFileName() string { + if len(o.RoleFile) == 0 { + return o.Name + "_role.yaml" + } + // TODO: validate file name + return o.RoleFile +} + +// RoleBindingName returns the RBAC role binding name to be used in the manifests. +func (o *ManifestOptions) RoleBindingName() string { + return o.Name + "-rolebinding" +} + +// RoleBindingFileName returns the name of the manifest file to use for the role binding. +func (o *ManifestOptions) RoleBindingFileName() string { + if len(o.BindingFile) == 0 { + return o.Name + "_role_binding.yaml" + } + // TODO: validate file name + return o.BindingFile +} + +// Validate validates the input options. +func (o *ManifestOptions) Validate() error { + if _, err := os.Stat(o.InputDir); err != nil { + return fmt.Errorf("invalid input directory '%s' %v", o.InputDir, err) + } + return nil +} + +// Generate generates RBAC manifests by parsing the RBAC annotations in Go source +// files specified in the input directory. +func Generate(o *ManifestOptions) error { + if err := o.Validate(); err != nil { + return err + } + + ops := parserOptions{ + rules: []rbacv1.PolicyRule{}, + } + err := general.ParseDir(o.InputDir, ops.parseAnnotation) + if err != nil { + return fmt.Errorf("failed to parse the input dir %v", err) + } + if len(ops.rules) == 0 { + return nil + } + roleManifest, err := getClusterRoleManifest(ops.rules, o) + if err != nil { + return fmt.Errorf("failed to generate role manifest %v", err) + } + + roleBindingManifest, err := getClusterRoleBindingManifest(o) + if err != nil { + return fmt.Errorf("failed to generate role binding manifests %v", err) + } + + err = os.MkdirAll(o.OutputDir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create output dir %v", err) + } + roleManifestFile := filepath.Join(o.OutputDir, o.RoleFileName()) + if err := ioutil.WriteFile(roleManifestFile, roleManifest, 0666); err != nil { + return fmt.Errorf("failed to write role manifest YAML file %v", err) + } + + roleBindingManifestFile := filepath.Join(o.OutputDir, o.RoleBindingFileName()) + if err := ioutil.WriteFile(roleBindingManifestFile, roleBindingManifest, 0666); err != nil { + return fmt.Errorf("failed to write role manifest YAML file %v", err) + } + return nil +} + +func getClusterRoleManifest(rules []rbacv1.PolicyRule, o *ManifestOptions) ([]byte, error) { + role := rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRole", + APIVersion: "rbac.authorization.k8s.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: o.RoleName(), + Labels: o.Labels, + }, + Rules: rules, + } + return yaml.Marshal(role) +} + +func getClusterRoleBindingManifest(o *ManifestOptions) ([]byte, error) { + rolebinding := &rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRoleBinding", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: o.RoleBindingName(), + Labels: o.Labels, + }, + Subjects: []rbacv1.Subject{ + { + Name: o.ServiceAccount, + Namespace: o.Namespace, + Kind: "ServiceAccount", + }, + }, + RoleRef: rbacv1.RoleRef{ + Name: o.RoleName(), + Kind: "ClusterRole", + APIGroup: "rbac.authorization.k8s.io", + }, + } + return yaml.Marshal(rolebinding) +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/rbac/parser.go b/vendor/sigs.k8s.io/controller-tools/pkg/rbac/parser.go new file mode 100644 index 0000000000000000000000000000000000000000..d28cfd9e67323bd41659e82287f5f2cca44b8a32 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/rbac/parser.go @@ -0,0 +1,83 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rbac contain libraries for generating RBAC manifests from RBAC +// annotations in Go source files. +package rbac + +import ( + "log" + "strings" + + rbacv1 "k8s.io/api/rbac/v1" + "sigs.k8s.io/controller-tools/pkg/internal/general" +) + +type parserOptions struct { + rules []rbacv1.PolicyRule +} + +// parseAnnotation parses RBAC annotations +func (o *parserOptions) parseAnnotation(commentText string) error { + for _, comment := range strings.Split(commentText, "\n") { + comment := strings.TrimSpace(comment) + if strings.HasPrefix(comment, "+rbac") { + if ann := general.GetAnnotation(comment, "rbac"); ann != "" { + o.rules = append(o.rules, parseRBACTag(ann)) + } + } + if strings.HasPrefix(comment, "+kubebuilder:rbac") { + if ann := general.GetAnnotation(comment, "kubebuilder:rbac"); ann != "" { + o.rules = append(o.rules, parseRBACTag(ann)) + } + } + } + return nil +} + +// parseRBACTag parses the given RBAC annotation in to an RBAC PolicyRule. +// This is copied from Kubebuilder code. +func parseRBACTag(tag string) rbacv1.PolicyRule { + result := rbacv1.PolicyRule{} + for _, elem := range strings.Split(tag, ",") { + key, value, err := general.ParseKV(elem) + if err != nil { + log.Fatalf("// +kubebuilder:rbac: tags must be key value pairs. Expected "+ + "keys [groups=<group1;group2>,resources=<resource1;resource2>,verbs=<verb1;verb2>] "+ + "Got string: [%s]", tag) + } + values := strings.Split(value, ";") + switch key { + case "groups": + normalized := []string{} + for _, v := range values { + if v == "core" { + normalized = append(normalized, "") + } else { + normalized = append(normalized, v) + } + } + result.APIGroups = normalized + case "resources": + result.Resources = values + case "verbs": + result.Verbs = values + case "urls": + result.NonResourceURLs = values + } + } + return result +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/util/util.go b/vendor/sigs.k8s.io/controller-tools/pkg/util/util.go new file mode 100644 index 0000000000000000000000000000000000000000..9649913b389b1ecf797879c47e18c9d4d8032c1a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/util/util.go @@ -0,0 +1,77 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "io" + "log" + "os" + "path/filepath" + + "github.com/spf13/afero" +) + +// FileWriter is a io wrapper to write files +type FileWriter struct { + Fs afero.Fs +} + +// WriteCloser returns a WriteCloser to write to given path +func (fw *FileWriter) WriteCloser(path string) (io.Writer, error) { + if fw.Fs == nil { + fw.Fs = afero.NewOsFs() + } + dir := filepath.Dir(path) + err := fw.Fs.MkdirAll(dir, 0700) + if err != nil { + return nil, err + } + + fi, err := fw.Fs.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return nil, err + } + + return fi, nil +} + +// WriteFile write given content to the file path +func (fw *FileWriter) WriteFile(filePath string, content []byte) error { + if fw.Fs == nil { + fw.Fs = afero.NewOsFs() + } + f, err := fw.WriteCloser(filePath) + if err != nil { + return fmt.Errorf("failed to create %s: %v", filePath, err) + } + + if c, ok := f.(io.Closer); ok { + defer func() { + if err := c.Close(); err != nil { + log.Fatal(err) + } + }() + } + + _, err = f.Write(content) + if err != nil { + return fmt.Errorf("failed to write %s: %v", filePath, err) + } + + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/admission.go b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/admission.go new file mode 100644 index 0000000000000000000000000000000000000000..13bd1bca43839ba9261880815cf7bae2a432d9a8 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/admission.go @@ -0,0 +1,89 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "errors" + "fmt" + "regexp" + "strings" + + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// admissionWebhook contains bits needed for generating a admissionWebhook Configuration +type admissionWebhook struct { + // name is the name of the webhook + name string + // typ is the webhook type, i.e. mutating, validating + typ webhookType + // path is the path this webhook will serve. + path string + // rules maps to the rules field in admissionregistrationv1beta1.admissionWebhook + rules []admissionregistrationv1beta1.RuleWithOperations + // failurePolicy maps to the failurePolicy field in admissionregistrationv1beta1.admissionWebhook + // This optional. If not set, will be defaulted to Ignore (fail-open) by the server. + // More details: https://github.com/kubernetes/api/blob/f5c295feaba2cbc946f0bbb8b535fc5f6a0345ee/admissionregistration/v1beta1/types.go#L144-L147 + failurePolicy *admissionregistrationv1beta1.FailurePolicyType + // namespaceSelector maps to the namespaceSelector field in admissionregistrationv1beta1.admissionWebhook + // This optional. + namespaceSelector *metav1.LabelSelector +} + +func (w *admissionWebhook) setDefaults() { + if len(w.path) == 0 { + if len(w.rules) == 0 || len(w.rules[0].Resources) == 0 { + // can't do defaulting, skip it. + return + } + if w.typ == mutatingWebhook { + w.path = "/mutate-" + w.rules[0].Resources[0] + } else if w.typ == validatingWebhook { + w.path = "/validate-" + w.rules[0].Resources[0] + } + } + if len(w.name) == 0 { + reg := regexp.MustCompile("[^a-zA-Z0-9]+") + processedPath := strings.ToLower(reg.ReplaceAllString(w.path, "")) + w.name = processedPath + ".example.com" + } +} + +var _ webhook = &admissionWebhook{} + +// GetType returns the type of the webhook. +func (w *admissionWebhook) GetType() webhookType { + return w.typ +} + +// Validate validates if the webhook is valid. +func (w *admissionWebhook) Validate() error { + if len(w.rules) == 0 { + return errors.New("field rules should not be empty") + } + if len(w.name) == 0 { + return errors.New("field name should not be empty") + } + if w.typ != mutatingWebhook && w.typ != validatingWebhook { + return fmt.Errorf("unsupported Type: %v, only mutatingWebhook and validatingWebhook are supported", w.typ) + } + if len(w.path) == 0 { + return errors.New("field path should not be empty") + } + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/generator.go b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/generator.go new file mode 100644 index 0000000000000000000000000000000000000000..838ab6c681c9994abb95353a6d72ecb8bf6ccfc7 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/generator.go @@ -0,0 +1,334 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "errors" + "net" + "net/url" + "path" + "sort" + "strconv" + + "k8s.io/api/admissionregistration/v1beta1" + admissionregistration "k8s.io/api/admissionregistration/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + apitypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +type generatorOptions struct { + // webhooks maps a path to a webhoook. + webhooks map[string]webhook + + // port is the port number that the server will serve. + // It will be defaulted to 443 if unspecified. + port int32 + + // certDir is the directory that contains the server key and certificate. + certDir string + + // mutatingWebhookConfigName is the name that used for creating the MutatingWebhookConfiguration object. + mutatingWebhookConfigName string + // validatingWebhookConfigName is the name that used for creating the ValidatingWebhookConfiguration object. + validatingWebhookConfigName string + + // secret is the location for storing the certificate for the admission server. + // The server should have permission to create a secret in the namespace. + secret *apitypes.NamespacedName + + // service is a k8s service fronting the webhook server pod(s). + // One and only one of service and host can be set. + // This maps to field .Webhooks.ClientConfig.Service + // https://github.com/kubernetes/api/blob/183f3326a9353bd6d41430fc80f96259331d029c/admissionregistration/v1beta1/types.go#L260 + service *service + // host is the host name of .Webhooks.ClientConfig.URL + // https://github.com/kubernetes/api/blob/183f3326a9353bd6d41430fc80f96259331d029c/admissionregistration/v1beta1/types.go#L250 + // One and only one of service and host can be set. + // If neither service nor host is unspecified, host will be defaulted to "localhost". + host *string +} + +// service contains information for creating a Service +type service struct { + // name of the Service + name string + // namespace of the Service + namespace string + // selectors is the selector of the Service. + // This must select the pods that runs this webhook server. + selectors map[string]string +} + +// setDefaults does defaulting for the generatorOptions. +func (o *generatorOptions) setDefaults() { + if o.webhooks == nil { + o.webhooks = map[string]webhook{} + } + if o.port <= 0 { + o.port = 443 + } + if len(o.certDir) == 0 { + o.certDir = path.Join("/tmp", "k8s-webhook-server", "serving-certs") + } + + if len(o.mutatingWebhookConfigName) == 0 { + o.mutatingWebhookConfigName = "mutating-webhook-configuration" + } + if len(o.validatingWebhookConfigName) == 0 { + o.validatingWebhookConfigName = "validating-webhook-configuration" + } + if o.host == nil && o.service == nil { + varString := "localhost" + o.host = &varString + } +} + +// Generate creates the AdmissionWebhookConfiguration objects and Service if any. +// It also provisions the certificate for the admission server. +func (o *generatorOptions) Generate() ([]runtime.Object, error) { + // do defaulting if necessary + o.setDefaults() + + webhookConfigurations, err := o.whConfigs() + if err != nil { + return nil, err + } + svc := o.getService() + objects := append(webhookConfigurations, svc) + + return objects, nil +} + +// whConfigs creates a mutatingWebhookConfiguration and(or) a validatingWebhookConfiguration. +func (o *generatorOptions) whConfigs() ([]runtime.Object, error) { + for _, webhook := range o.webhooks { + if err := webhook.Validate(); err != nil { + return nil, err + } + } + + objs := []runtime.Object{} + mutatingWH, err := o.mutatingWHConfig() + if err != nil { + return nil, err + } + if mutatingWH != nil { + objs = append(objs, mutatingWH) + } + validatingWH, err := o.validatingWHConfigs() + if err != nil { + return nil, err + } + if validatingWH != nil { + objs = append(objs, validatingWH) + } + return objs, nil +} + +// mutatingWHConfig creates mutatingWebhookConfiguration. +func (o *generatorOptions) mutatingWHConfig() (runtime.Object, error) { + mutatingWebhooks := []v1beta1.Webhook{} + for path, webhook := range o.webhooks { + if webhook.GetType() != mutatingWebhook { + continue + } + + aw := webhook.(*admissionWebhook) + wh, err := o.admissionWebhook(path, aw) + if err != nil { + return nil, err + } + mutatingWebhooks = append(mutatingWebhooks, *wh) + } + + sort.Slice(mutatingWebhooks, func(i, j int) bool { + return mutatingWebhooks[i].Name < mutatingWebhooks[j].Name + }) + + if len(mutatingWebhooks) > 0 { + return &admissionregistration.MutatingWebhookConfiguration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: metav1.GroupVersion{Group: admissionregistration.GroupName, Version: "v1beta1"}.String(), + Kind: "MutatingWebhookConfiguration", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: o.mutatingWebhookConfigName, + Annotations: map[string]string{ + // TODO(DirectXMan12): Change the annotation to the format that cert-manager decides to use. + "alpha.admissionwebhook.cert-manager.io": "true", + }, + }, + Webhooks: mutatingWebhooks, + }, nil + } + return nil, nil +} + +func (o *generatorOptions) validatingWHConfigs() (runtime.Object, error) { + validatingWebhooks := []v1beta1.Webhook{} + for path, webhook := range o.webhooks { + var aw *admissionWebhook + if webhook.GetType() != validatingWebhook { + continue + } + + aw = webhook.(*admissionWebhook) + wh, err := o.admissionWebhook(path, aw) + if err != nil { + return nil, err + } + validatingWebhooks = append(validatingWebhooks, *wh) + } + + sort.Slice(validatingWebhooks, func(i, j int) bool { + return validatingWebhooks[i].Name < validatingWebhooks[j].Name + }) + + if len(validatingWebhooks) > 0 { + return &admissionregistration.ValidatingWebhookConfiguration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: metav1.GroupVersion{Group: admissionregistration.GroupName, Version: "v1beta1"}.String(), + Kind: "ValidatingWebhookConfiguration", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: o.validatingWebhookConfigName, + Annotations: map[string]string{ + // TODO(DirectXMan12): Change the annotation to the format that cert-manager decides to use. + "alpha.admissionwebhook.cert-manager.io": "true", + }, + }, + Webhooks: validatingWebhooks, + }, nil + } + return nil, nil +} + +func (o *generatorOptions) admissionWebhook(path string, wh *admissionWebhook) (*admissionregistration.Webhook, error) { + if wh.namespaceSelector == nil && o.service != nil && len(o.service.namespace) > 0 { + wh.namespaceSelector = &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "control-plane", + Operator: metav1.LabelSelectorOpDoesNotExist, + }, + }, + } + } + + webhook := &admissionregistration.Webhook{ + Name: wh.name, + Rules: wh.rules, + FailurePolicy: wh.failurePolicy, + NamespaceSelector: wh.namespaceSelector, + } + cc, err := o.getClientConfigWithPath(path) + if err != nil { + return nil, err + } + webhook.ClientConfig = *cc + return webhook, nil +} + +// getClientConfigWithPath constructs a WebhookClientConfig based on the server generatorOptions. +// It will use path to the set the path in WebhookClientConfig. +func (o *generatorOptions) getClientConfigWithPath(path string) (*admissionregistration.WebhookClientConfig, error) { + cc, err := o.getClientConfig() + if err != nil { + return nil, err + } + return cc, setPath(cc, path) +} + +func (o *generatorOptions) getClientConfig() (*admissionregistration.WebhookClientConfig, error) { + if o.host != nil && o.service != nil { + return nil, errors.New("URL and service can't be set at the same time") + } + cc := &admissionregistration.WebhookClientConfig{ + // Put an non-empty and not harmful CABundle here. + // Not doing this will cause the field + CABundle: []byte(`\n`), + } + if o.host != nil { + u := url.URL{ + Scheme: "https", + Host: net.JoinHostPort(*o.host, strconv.Itoa(int(o.port))), + } + urlString := u.String() + cc.URL = &urlString + } + if o.service != nil { + cc.Service = &admissionregistration.ServiceReference{ + Name: o.service.name, + Namespace: o.service.namespace, + // Path will be set later + } + } + return cc, nil +} + +// setPath sets the path in the WebhookClientConfig. +func setPath(cc *admissionregistration.WebhookClientConfig, path string) error { + if cc.URL != nil { + u, err := url.Parse(*cc.URL) + if err != nil { + return err + } + u.Path = path + urlString := u.String() + cc.URL = &urlString + } + if cc.Service != nil { + cc.Service.Path = &path + } + return nil +} + +// getService creates a corev1.Service object fronting the admission server. +func (o *generatorOptions) getService() runtime.Object { + if o.service == nil { + return nil + } + svc := &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: o.service.name, + Namespace: o.service.namespace, + Annotations: map[string]string{ + // Secret here only need name, since it will be in the same namespace as the service. + // TODO(DirectXMan12): Change the annotation to the format that cert-manager decides to use. + "alpha.service.cert-manager.io/serving-cert-secret-name": o.secret.Name, + }, + }, + Spec: corev1.ServiceSpec{ + Selector: o.service.selectors, + Ports: []corev1.ServicePort{ + { + // When using service, kube-apiserver will send admission request to port 443. + Port: 443, + TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: o.port}, + }, + }, + }, + } + return svc +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/manifests.go b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/manifests.go new file mode 100644 index 0000000000000000000000000000000000000000..3bea5853e07c28a578940c710190a985e8d20103 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/manifests.go @@ -0,0 +1,151 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "bytes" + "fmt" + "path" + "strings" + "text/template" + + "github.com/ghodss/yaml" + "github.com/spf13/afero" + + "sigs.k8s.io/controller-tools/pkg/internal/general" +) + +// Options represent options for generating the webhook manifests. +type Options struct { + // WriterOptions specifies the input and output + WriterOptions + + generatorOptions +} + +// Generate generates RBAC manifests by parsing the RBAC annotations in Go source +// files specified in the input directory. +func Generate(o *Options) error { + if err := o.WriterOptions.Validate(); err != nil { + return err + } + + err := general.ParseDir(o.InputDir, o.parseAnnotation) + if err != nil { + return fmt.Errorf("failed to parse the input dir: %v", err) + } + + if len(o.webhooks) == 0 { + return nil + } + + objs, err := o.Generate() + if err != nil { + return err + } + + err = o.WriteObjectsToDisk(objs...) + if err != nil { + return err + } + + return o.controllerManagerPatch() +} + +func (o *Options) controllerManagerPatch() error { + var kustomizeLabelPatch = `apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: controller-manager +spec: + template: + metadata: +{{- with .Labels }} + labels: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + containers: + - name: manager + ports: + - containerPort: {{ .Port }} + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: {{ .CertDir }} + name: cert + readOnly: true + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: {{ .SecretName }} +` + + type KustomizeLabelPatch struct { + Labels map[string]string + SecretName string + Port int32 + CertDir string + } + + p := KustomizeLabelPatch{ + Labels: o.service.selectors, + SecretName: o.secret.Name, + Port: o.port, + CertDir: o.certDir, + } + funcMap := template.FuncMap{ + "toYaml": toYAML, + "indent": indent, + } + temp, err := template.New("kustomizeLabelPatch").Funcs(funcMap).Parse(kustomizeLabelPatch) + if err != nil { + return err + } + buf := bytes.NewBuffer(nil) + if err := temp.Execute(buf, p); err != nil { + return err + } + return afero.WriteFile(o.outFs, path.Join(o.PatchOutputDir, "manager_patch.yaml"), buf.Bytes(), 0644) +} + +func toYAML(m map[string]string) (string, error) { + d, err := yaml.Marshal(m) + return string(d), err +} + +func indent(n int, s string) (string, error) { + buf := bytes.NewBuffer(nil) + for _, elem := range strings.Split(s, "\n") { + for i := 0; i < n; i++ { + _, err := buf.WriteRune(' ') + if err != nil { + return "", err + } + } + _, err := buf.WriteString(elem) + if err != nil { + return "", err + } + _, err = buf.WriteRune('\n') + if err != nil { + return "", err + } + } + return strings.TrimRight(buf.String(), " \n"), nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/parser.go b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/parser.go new file mode 100644 index 0000000000000000000000000000000000000000..9ac73cef045229f81a2d6b6f92f6ce3241661d70 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/parser.go @@ -0,0 +1,236 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "errors" + "fmt" + "log" + "strconv" + "strings" + + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-tools/pkg/internal/general" +) + +const webhookAnnotationPrefix = "kubebuilder:webhook" + +var ( + webhookTags = sets.NewString([]string{"groups", "versions", "resources", "verbs", "type", "name", "path", "failure-policy"}...) + serverTags = sets.NewString([]string{"port", "cert-dir", "service", "selector", "secret", "host", "mutating-webhook-config-name", "validating-webhook-config-name"}...) +) + +// parseAnnotation parses webhook annotations +func (o *Options) parseAnnotation(commentText string) error { + webhookKVMap, serverKVMap := map[string]string{}, map[string]string{} + for _, comment := range strings.Split(commentText, "\n") { + comment := strings.TrimSpace(comment) + anno := general.GetAnnotation(comment, webhookAnnotationPrefix) + if len(anno) == 0 { + continue + } + for _, elem := range strings.Split(anno, ",") { + key, value, err := general.ParseKV(elem) + if err != nil { + log.Fatalf("// +kubebuilder:webhook: tags must be key value pairs. Example "+ + "keys [groups=<group1;group2>,resources=<resource1;resource2>,verbs=<verb1;verb2>] "+ + "Got string: [%s]", anno) + } + switch { + case webhookTags.Has(key): + webhookKVMap[key] = value + case serverTags.Has(key): + serverKVMap[key] = value + } + } + } + + if err := o.parseWebhookAnnotation(webhookKVMap); err != nil { + return err + } + return o.parseServerAnnotation(serverKVMap) +} + +// parseWebhookAnnotation parses webhook annotations in the same comment group +// nolint: gocyclo +func (o *Options) parseWebhookAnnotation(kvMap map[string]string) error { + if len(kvMap) == 0 { + return nil + } + rule := admissionregistrationv1beta1.RuleWithOperations{} + w := &admissionWebhook{} + for key, value := range kvMap { + switch key { + case "groups": + values := strings.Split(value, ";") + normalized := []string{} + for _, v := range values { + if v == "core" { + normalized = append(normalized, "") + } else { + normalized = append(normalized, v) + } + } + rule.APIGroups = values + + case "versions": + values := strings.Split(value, ";") + rule.APIVersions = values + + case "resources": + values := strings.Split(value, ";") + rule.Resources = values + + case "verbs": + values := strings.Split(value, ";") + var ops []admissionregistrationv1beta1.OperationType + for _, v := range values { + switch strings.ToLower(v) { + case strings.ToLower(string(admissionregistrationv1beta1.Create)): + ops = append(ops, admissionregistrationv1beta1.Create) + case strings.ToLower(string(admissionregistrationv1beta1.Update)): + ops = append(ops, admissionregistrationv1beta1.Update) + case strings.ToLower(string(admissionregistrationv1beta1.Delete)): + ops = append(ops, admissionregistrationv1beta1.Delete) + case strings.ToLower(string(admissionregistrationv1beta1.Connect)): + ops = append(ops, admissionregistrationv1beta1.Connect) + case strings.ToLower(string(admissionregistrationv1beta1.OperationAll)): + ops = append(ops, admissionregistrationv1beta1.OperationAll) + default: + return fmt.Errorf("unknown operation: %v", v) + } + } + rule.Operations = ops + + case "type": + switch strings.ToLower(value) { + case "mutating": + w.typ = mutatingWebhook + case "validating": + w.typ = validatingWebhook + default: + return fmt.Errorf("unknown webhook type: %v", value) + } + + case "name": + w.name = value + + case "path": + w.path = value + + case "failure-policy": + switch strings.ToLower(value) { + case strings.ToLower(string(admissionregistrationv1beta1.Ignore)): + fp := admissionregistrationv1beta1.Ignore + w.failurePolicy = &fp + case strings.ToLower(string(admissionregistrationv1beta1.Fail)): + fp := admissionregistrationv1beta1.Fail + w.failurePolicy = &fp + default: + return fmt.Errorf("unknown webhook failure policy: %v", value) + } + } + } + w.rules = []admissionregistrationv1beta1.RuleWithOperations{rule} + if o.webhooks == nil { + o.webhooks = map[string]webhook{} + } + o.webhooks[w.path] = w + return nil +} + +// parseWebhookAnnotation parses webhook server annotations in the same comment group +// nolint: gocyclo +func (o *Options) parseServerAnnotation(kvMap map[string]string) error { + if len(kvMap) == 0 { + return nil + } + for key, value := range kvMap { + switch key { + case "port": + port, err := strconv.Atoi(value) + if err != nil { + return err + } + o.port = int32(port) + case "cert-dir": + o.certDir = value + case "service": + // format: <service=namespace:name> + split := strings.Split(value, ":") + if len(split) != 2 || len(split[0]) == 0 || len(split[1]) == 0 { + return fmt.Errorf("invalid service format: expect <namespace:name>, but got %q", value) + } + if o.service == nil { + o.service = &service{} + } + o.service.namespace = split[0] + o.service.name = split[1] + case "selector": + // selector of the service. Format: <selector=label1:value1;label2:value2> + split := strings.Split(value, ";") + if len(split) == 0 { + return fmt.Errorf("invalid selector format: expect <label1:value1;label2:value2>, but got %q", value) + } + if o.service == nil { + o.service = &service{} + } + for _, v := range split { + l := strings.Split(v, ":") + if len(l) != 2 || len(l[0]) == 0 || len(l[1]) == 0 { + return fmt.Errorf("invalid selector format: expect <label1:value1;label2:value2>, but got %q", value) + } + if o.service.selectors == nil { + o.service.selectors = map[string]string{} + } + o.service.selectors[l[0]] = l[1] + } + case "host": + if len(value) == 0 { + return errors.New("host should not be empty if specified") + } + o.host = &value + + case "mutating-webhook-config-name": + if len(value) == 0 { + return errors.New("mutating-webhook-config-name should not be empty if specified") + } + o.mutatingWebhookConfigName = value + + case "validating-webhook-config-name": + if len(value) == 0 { + return errors.New("validating-webhook-config-name should not be empty if specified") + } + o.validatingWebhookConfigName = value + + case "secret": + // format: <secret=namespace:name> + split := strings.Split(value, ":") + if len(split) != 2 || len(split[0]) == 0 || len(split[1]) == 0 { + return fmt.Errorf("invalid secret format: expect <namespace:name>, but got %q", value) + } + if o.secret == nil { + o.secret = &types.NamespacedName{} + } + o.secret.Namespace = split[0] + o.secret.Name = split[1] + } + } + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/types.go b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/types.go new file mode 100644 index 0000000000000000000000000000000000000000..a7129e921c591b88cba706f337bf5a83873ea630 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/types.go @@ -0,0 +1,38 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +// webhookType defines the type of a webhook +type webhookType int + +const ( + _ = iota + // mutatingWebhook represents mutating type webhook + mutatingWebhook webhookType = iota + // validatingWebhook represents validating type webhook + validatingWebhook +) + +// webhook defines the basics that a webhook should support. +type webhook interface { + // GetType returns the Type of the webhook. + // e.g. mutating or validating + GetType() webhookType + // Validate validates if the webhook itself is valid. + // If invalid, a non-nil error will be returned. + Validate() error +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/writer.go b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/writer.go new file mode 100644 index 0000000000000000000000000000000000000000..ecbb45f2a11fd16a6f884213cb98517dd9edaa4a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/writer.go @@ -0,0 +1,92 @@ +package webhook + +import ( + "bytes" + "fmt" + "path" + "path/filepath" + + "github.com/ghodss/yaml" + "github.com/spf13/afero" + + "k8s.io/apimachinery/pkg/runtime" +) + +// WriterOptions specifies the input and output. +type WriterOptions struct { + InputDir string + OutputDir string + PatchOutputDir string + + // inFs is filesystem to be used for reading input + inFs afero.Fs + // outFs is filesystem to be used for writing out the result + outFs afero.Fs +} + +// SetDefaults sets up the default options for RBAC Manifest generator. +func (o *WriterOptions) SetDefaults() { + if o.inFs == nil { + o.inFs = afero.NewOsFs() + } + if o.outFs == nil { + o.outFs = afero.NewOsFs() + } + + if len(o.InputDir) == 0 { + o.InputDir = filepath.Join(".", "pkg", "webhook") + } + if len(o.OutputDir) == 0 { + o.OutputDir = filepath.Join(".", "config", "webhook") + } + if len(o.PatchOutputDir) == 0 { + o.PatchOutputDir = filepath.Join(".", "config", "default") + } +} + +// Validate validates the input options. +func (o *WriterOptions) Validate() error { + if _, err := o.inFs.Stat(o.InputDir); err != nil { + return fmt.Errorf("invalid input directory '%s' %v", o.InputDir, err) + } + return nil +} + +// WriteObjectsToDisk writes object to the location specified in WriterOptions. +func (o *WriterOptions) WriteObjectsToDisk(objects ...runtime.Object) error { + exists, err := afero.DirExists(o.outFs, o.OutputDir) + if err != nil { + return err + } + if !exists { + err = o.outFs.MkdirAll(o.OutputDir, 0766) + if err != nil { + return err + } + } + + var buf bytes.Buffer + isFirstObject := true + for _, obj := range objects { + if !isFirstObject { + _, err = buf.WriteString("---\n") + if err != nil { + return err + } + } + marshalled, err := yaml.Marshal(obj) + if err != nil { + return err + } + _, err = buf.Write(marshalled) + if err != nil { + return err + } + isFirstObject = false + } + err = afero.WriteFile(o.outFs, path.Join(o.OutputDir, "webhookmanifests.yaml"), buf.Bytes(), 0644) + if err != nil { + return err + } + return nil +}